From 4ac6204d2cbb44bec8ed0d4e216dfe7b19d97a6d Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 21:11:09 -0300 Subject: [PATCH 01/69] feat: KeyToIncludeInRelayProofApi --- Cargo.lock | 1 + cumulus/client/consensus/aura/src/collator.rs | 4 + .../consensus/aura/src/collators/basic.rs | 1 + .../consensus/aura/src/collators/lookahead.rs | 17 +++- .../consensus/aura/src/collators/mod.rs | 32 ++++++- .../slot_based/block_builder_task.rs | 2 +- .../aura/src/collators/slot_based/tests.rs | 9 ++ cumulus/client/parachain-inherent/src/lib.rs | 91 ++++++++++++++++++- .../src/lib.rs | 12 +++ .../client/relay-chain-interface/Cargo.toml | 1 + .../client/relay-chain-interface/src/lib.rs | 18 ++++ .../relay-chain-rpc-interface/src/lib.rs | 14 +++ cumulus/pallets/aura-ext/src/test.rs | 1 + cumulus/pallets/parachain-system/src/lib.rs | 10 ++ cumulus/pallets/parachain-system/src/mock.rs | 1 + .../src/relay_state_snapshot.rs | 31 +++++++ cumulus/pallets/xcmp-queue/src/mock.rs | 1 + .../assets/asset-hub-rococo/src/lib.rs | 1 + .../assets/asset-hub-westend/src/lib.rs | 1 + .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 + .../bridge-hubs/bridge-hub-westend/src/lib.rs | 1 + .../collectives-westend/src/lib.rs | 1 + .../coretime/coretime-westend/src/lib.rs | 1 + .../glutton/glutton-westend/src/lib.rs | 1 + .../runtimes/people/people-westend/src/lib.rs | 1 + .../runtimes/testing/penpal/src/lib.rs | 1 + .../testing/yet-another-parachain/src/lib.rs | 1 + .../polkadot-omni-node/lib/src/common/aura.rs | 3 + .../lib/src/fake_runtime_api/utils.rs | 7 ++ cumulus/primitives/core/src/lib.rs | 44 +++++++++ cumulus/test/runtime/src/lib.rs | 7 ++ 31 files changed, 309 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a959ef0c98433..3ac99dd9da2ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4870,6 +4870,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-state-machine", + "sp-storage 19.0.0", "sp-version", "thiserror 1.0.65", ] diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 3999352322e20..2fab2f7e424d9 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -177,6 +177,7 @@ where parent_hash: Block::Hash, timestamp: impl Into>, relay_parent_descendants: Option, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { let paras_inherent_data = ParachainInherentDataProvider::create_at( @@ -188,6 +189,7 @@ where .map(RelayParentData::into_inherent_descendant_list) .unwrap_or_default(), Vec::new(), + relay_proof_request, collator_peer_id, ) .await; @@ -224,6 +226,7 @@ where validation_data: &PersistedValidationData, parent_hash: Block::Hash, timestamp: impl Into>, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { self.create_inherent_data_with_rp_offset( @@ -232,6 +235,7 @@ where parent_hash, timestamp, None, + relay_proof_request, collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 1f99e2f6e5cc0..532da7ede18e3 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -238,6 +238,7 @@ where &validation_data, parent_hash, claim.timestamp(), + Default::default(), params.collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 303b5268095c5..c33ce1c41d6c9 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -36,7 +36,7 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; +use cumulus_primitives_core::{CollectCollationInfo, KeyToIncludeInRelayProofApi, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use sp_consensus::Environment; @@ -164,8 +164,10 @@ where + Send + Sync + 'static, - Client::Api: - AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Client::Api: AuraApi + + CollectCollationInfo + + AuraUnincludedSegmentApi + + KeyToIncludeInRelayProofApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -216,8 +218,10 @@ where + Send + Sync + 'static, - Client::Api: - AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Client::Api: AuraApi + + CollectCollationInfo + + AuraUnincludedSegmentApi + + KeyToIncludeInRelayProofApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -392,12 +396,15 @@ where // Build and announce collations recursively until // `can_build_upon` fails or building a collation fails. + let relay_proof_request = super::get_relay_proof_request(&*params.para_client, parent_hash); + let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data( relay_parent, &validation_data, parent_hash, slot_claim.timestamp(), + relay_proof_request, params.collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index d938dca69282f..0b2981691fc24 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -25,7 +25,9 @@ use crate::collator::SlotClaim; use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; -use cumulus_primitives_core::{relay_chain::Header as RelayHeader, BlockT}; +use cumulus_primitives_core::{ + relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, +}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use polkadot_node_subsystem::messages::{CollatorProtocolMessage, RuntimeApiRequest}; use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; @@ -662,6 +664,34 @@ mod tests { } } +/// Fetches relay chain storage proof requests from the parachain runtime. +/// +/// Queries the runtime API to determine which relay chain storage keys +/// (both top-level and child trie keys) should be included in the relay chain state proof. +/// +/// Falls back to an empty request if the runtime API call fails or is not implemented. +fn get_relay_proof_request( + client: &Client, + parent_hash: Block::Hash, +) -> cumulus_primitives_core::RelayProofRequest +where + Block: BlockT, + Client: ProvideRuntimeApi, + Client::Api: KeyToIncludeInRelayProofApi, +{ + client + .runtime_api() + .keys_to_prove(parent_hash) + .unwrap_or_else(|e| { + tracing::warn!( + target: crate::LOG_TARGET, + error = ?e, + "Failed to fetch relay proof requests from runtime, using empty request" + ); + Default::default() + }) +} + /// Holds a relay parent and its descendants. pub struct RelayParentData { /// The relay parent block header diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 4a58ed81426af..173550d995b63 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -359,7 +359,6 @@ where relay_parent_storage_root: *relay_parent_header.state_root(), max_pov_size: *max_pov_size, }; - let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data_with_rp_offset( relay_parent, @@ -367,6 +366,7 @@ where parent_hash, slot_claim.timestamp(), Some(rp_data), + Default::default(), collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs index e0ba35e558afe..ef4ed09c6dc66 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs @@ -566,6 +566,15 @@ impl RelayChainInterface for TestRelayClient { unimplemented!("Not needed for test") } + async fn prove_child_read( + &self, + _: RelayHash, + _: &cumulus_relay_chain_interface::ChildInfo, + _: &[Vec], + ) -> RelayChainResult { + unimplemented!("Not needed for test") + } + async fn wait_for_block(&self, _: RelayHash) -> RelayChainResult<()> { unimplemented!("Not needed for test") } diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 5e994cd472f70..8ea3c4f96c5ae 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -30,6 +30,8 @@ pub use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_ use cumulus_relay_chain_interface::RelayChainInterface; pub use mock::{MockValidationDataInherentDataProvider, MockXcmConfig}; use sc_network_types::PeerId; +use sp_state_machine::StorageProof; +use sp_storage::ChildInfo; const LOG_TARGET: &str = "parachain-inherent"; @@ -157,6 +159,84 @@ async fn collect_relay_storage_proof( .ok() } +/// Collect storage proofs for relay chain data. +/// +/// Generates proofs for both top-level relay chain storage and child trie data. +/// Top-level keys are proven directly. Child trie roots are automatically included +/// from their standard storage locations (`:child_storage:default:` + identifier). +/// +/// Returns a merged proof combining all requested data, or `None` if there are no requests. +async fn collect_relay_storage_proofs( + relay_chain_interface: &impl RelayChainInterface, + relay_parent: PHash, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, +) -> Option { + use cumulus_primitives_core::RelayStorageKey; + + let cumulus_primitives_core::RelayProofRequest { keys } = relay_proof_request; + + if keys.is_empty() { + return None; + } + + let mut combined_proof: Option = None; + + // Group keys by storage type + let mut top_keys = Vec::new(); + let mut child_keys: std::collections::BTreeMap, Vec>> = + std::collections::BTreeMap::new(); + + for key in keys { + match key { + RelayStorageKey::Top(k) => top_keys.push(k), + RelayStorageKey::Child { storage_key, key } => { + child_keys.entry(storage_key).or_default().push(key); + }, + } + } + + // Collect top-level storage proofs + if !top_keys.is_empty() { + match relay_chain_interface.prove_read(relay_parent, &top_keys).await { + Ok(top_proof) => { + combined_proof = Some(top_proof); + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + error = ?e, + "Cannot obtain top-level storage proof from relay chain.", + ); + }, + } + } + + // Collect child trie proofs + for (storage_key, data_keys) in child_keys { + let child_info = ChildInfo::new_default(&storage_key); + match relay_chain_interface.prove_child_read(relay_parent, &child_info, &data_keys).await { + Ok(child_proof) => { + combined_proof = match combined_proof { + None => Some(child_proof), + Some(existing) => Some(StorageProof::merge([existing, child_proof])), + }; + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + child_trie_id = ?child_info.storage_key(), + error = ?e, + "Cannot obtain child trie proof from relay chain.", + ); + }, + } + } + + combined_proof +} + pub struct ParachainInherentDataProvider; impl ParachainInherentDataProvider { @@ -170,6 +250,7 @@ impl ParachainInherentDataProvider { para_id: ParaId, relay_parent_descendants: Vec, additional_relay_state_keys: Vec>, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Option { let collator_peer_id = ApprovedPeerId::try_from(collator_peer_id.to_bytes()) @@ -188,7 +269,7 @@ impl ParachainInherentDataProvider { .iter() .skip(1) .any(sc_consensus_babe::contains_epoch_change::); - let relay_chain_state = collect_relay_storage_proof( + let mut relay_chain_state = collect_relay_storage_proof( relay_chain_interface, para_id, relay_parent, @@ -198,6 +279,14 @@ impl ParachainInherentDataProvider { ) .await?; + // Collect additional requested storage proofs (top-level and child tries) + if let Some(additional_proofs) = + collect_relay_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) + .await + { + relay_chain_state = StorageProof::merge([relay_chain_state, additional_proofs]); + } + let downward_messages = relay_chain_interface .retrieve_dmq_contents(para_id, relay_parent) .await diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index b989f81efd5dc..f7b3f810b6015 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -240,6 +240,18 @@ impl RelayChainInterface for RelayChainInProcessInterface { .map_err(RelayChainError::StateMachineError) } + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &cumulus_relay_chain_interface::ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult { + let state_backend = self.backend.state_at(relay_parent, TrieCacheContext::Untrusted)?; + + sp_state_machine::prove_child_read(state_backend, child_info, child_keys) + .map_err(RelayChainError::StateMachineError) + } + /// Wait for a given relay chain block in an async way. /// /// The caller needs to pass the hash of a block it waits for and the function will return when diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index be19f99526659..db89a573b3537 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -21,6 +21,7 @@ sc-network = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } sp-version = { workspace = true } async-trait = { workspace = true } diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index dd03738ed0029..8f87ccc6997b2 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -42,6 +42,7 @@ pub use cumulus_primitives_core::{ }; pub use polkadot_overseer::Handle as OverseerHandle; pub use sp_state_machine::StorageValue; +pub use sp_storage::ChildInfo; pub type RelayChainResult = Result; @@ -213,6 +214,14 @@ pub trait RelayChainInterface: Send + Sync { relevant_keys: &Vec>, ) -> RelayChainResult; + /// Generate a child trie storage read proof. + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult; + /// Returns the validation code hash for the given `para_id` using the given /// `occupied_core_assumption`. async fn validation_code_hash( @@ -354,6 +363,15 @@ where (**self).prove_read(relay_parent, relevant_keys).await } + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult { + (**self).prove_child_read(relay_parent, child_info, child_keys).await + } + async fn wait_for_block(&self, hash: PHash) -> RelayChainResult<()> { (**self).wait_for_block(hash).await } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 84d22676789cf..9c7732e6e452e 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -210,6 +210,20 @@ impl RelayChainInterface for RelayChainRpcInterface { }) } + async fn prove_child_read( + &self, + _relay_parent: RelayHash, + _child_info: &cumulus_relay_chain_interface::ChildInfo, + _child_keys: &[Vec], + ) -> RelayChainResult { + // Not implemented: requires relay chain RPC to expose child trie proof method. + tracing::warn!( + target: "relay-chain-rpc-interface", + "prove_child_read not implemented for RPC interface, returning empty proof" + ); + Ok(StorageProof::empty()) + } + /// Wait for a given relay chain block /// /// The hash of the block to wait for is passed. We wait for the block to arrive or return after diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs index 7c4c78ab2a5b0..3486e56a5c2e4 100644 --- a/cumulus/pallets/aura-ext/src/test.rs +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -151,6 +151,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } fn set_ancestors() { diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 5ff4af131f565..ed55293a88db0 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -85,6 +85,7 @@ use unincluded_segment::{ }; pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; +pub use relay_state_snapshot::ProcessRelayProofKeys; /// Register the `validate_block` function that is used by parachains to validate blocks on a /// validator. /// @@ -263,6 +264,13 @@ pub mod pallet { /// /// If set to 0, this config has no impact. type RelayParentOffset: Get; + + /// Processor for relay chain proof keys. + /// + /// This allows parachains to process data from the relay chain state proof, + /// including both child trie keys and main trie keys that were requested + /// via `KeyToIncludeInRelayProofApi`. + type RelayProofKeysProcessor: relay_state_snapshot::ProcessRelayProofKeys; } #[pallet::hooks] @@ -701,6 +709,8 @@ pub mod pallet { >::put(relevant_messaging_state.clone()); >::put(host_config); + total_weight.saturating_accrue(T::RelayProofKeysProcessor::process_relay_proof_keys(&relay_state_proof)); + ::on_validation_data(&vfp); if let Some(collator_peer_id) = collator_peer_id { diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index d3c7cef52b637..b361031be2c37 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -99,6 +99,7 @@ impl Config for Test { type ConsensusHook = TestConsensusHook; type WeightInfo = (); type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } std::thread_local! { diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 7138d61edd277..7c6efb5ddf73e 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -21,11 +21,26 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, AbridgedHrmpChannel, ParaId, }; +use frame_support::weights::Weight; use scale_info::TypeInfo; use sp_runtime::traits::HashingFor; use sp_state_machine::{Backend, TrieBackend, TrieBackendBuilder}; use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX}; +/// Process keys from verified relay chain state proofs. +/// +/// This trait allows processing of relay chain storage data from the verified proof. +pub trait ProcessRelayProofKeys { + /// Process keys from a verified relay state proof. + fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight; +} + +impl ProcessRelayProofKeys for () { + fn process_relay_proof_keys(_verified_proof: &RelayChainStateProof) -> Weight { + Weight::zero() + } +} + /// The capacity of the upward message queue of a parachain on the relay chain. // The field order should stay the same as the data can be found in the proof to ensure both are // have the same encoded representation. @@ -383,4 +398,20 @@ impl RelayChainStateProof { { read_optional_entry(&self.trie_backend, key).map_err(Error::ReadOptionalEntry) } + + /// Read a value from a child trie in the relay chain state proof. + /// + /// Returns `Ok(Some(value))` if the key exists in the child trie, + /// `Ok(None)` if the key doesn't exist, + /// or `Err` if there was a proof error. + pub fn read_child_storage( + &self, + child_info: &sp_core::storage::ChildInfo, + key: &[u8], + ) -> Result>, Error> { + use sp_state_machine::Backend; + self.trie_backend + .child_storage(child_info, key) + .map_err(|_| Error::ReadEntry(ReadEntryErr::Proof)) + } } diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 3be87221c052e..1e32c9003a948 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -106,6 +106,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 69c7a9e544326..720ebe8ee2b4b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -745,6 +745,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 8b9cbf66015c7..f0cb527c42f09 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -903,6 +903,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index cfc58a2a4f6eb..dac8fc398127b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -400,6 +400,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 62532fac5fec3..625c2ebe24507 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -392,6 +392,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index ba70aabd9d0cf..37edc0f02329c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -423,6 +423,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index ed8748a34933a..fb72ae7a59586 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -305,6 +305,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index eadc4d289fe9d..98131ac4ef58a 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -191,6 +191,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index dc4a616f02d1c..427ff1454d8fd 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -281,6 +281,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index bc018b160f778..4d1ee6a0d2254 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -664,6 +664,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; type RelayParentOffset = ConstU32<0>; + type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index 0f5d2ecd99494..1c5ead9e7b90b 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -368,6 +368,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; + type RelayProofKeysProcessor = (); } impl pallet_message_queue::Config for Runtime { diff --git a/cumulus/polkadot-omni-node/lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs index 9ca725ff3279a..10a631306b33a 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/aura.rs @@ -18,6 +18,7 @@ use codec::Codec; use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::KeyToIncludeInRelayProofApi; use sp_consensus_aura::AuraApi; use sp_runtime::{ app_crypto::{AppCrypto, AppPair, AppSignature, Pair}, @@ -53,6 +54,7 @@ pub trait AuraRuntimeApi: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi + + KeyToIncludeInRelayProofApi + Sized { /// Check if the runtime has the Aura API. @@ -66,5 +68,6 @@ impl AuraRuntimeApi for T wher T: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi + + KeyToIncludeInRelayProofApi { } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 56eb3d2ae7602..9d60924f55b84 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -175,6 +175,13 @@ macro_rules! impl_node_runtime_apis { unimplemented!() } } + + impl cumulus_primitives_core::KeyToIncludeInRelayProofApi<$block> for $runtime { + fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + unimplemented!() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime<$block> for $runtime { fn on_runtime_upgrade( diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 774961b6b7e6b..d4dc8ccb8dc4e 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -466,6 +466,33 @@ pub struct CollationInfo { pub head_data: HeadData, } +/// A relay chain storage key to be included in the storage proof. +#[derive(Clone, Debug, Encode, Decode, TypeInfo, PartialEq, Eq)] +pub enum RelayStorageKey { + /// Top-level relay chain storage key. + Top(Vec), + /// Child trie storage key. + Child { + /// Unprefixed storage key identifying the child trie root location. + /// Prefix `:child_storage:default:` is added when accessing storage. + /// Used to derive `ChildInfo` for reading child trie data. + /// Usage: let child_info = ChildInfo::new_default(&storage_key); + storage_key: Vec, + /// Key within the child trie. + key: Vec, + }, +} + +/// Request for proving relay chain storage data. +/// +/// Contains a list of storage keys (either top-level or child trie keys) +/// to be included in the relay chain state proof. +#[derive(Clone, Debug, Encode, Decode, TypeInfo, PartialEq, Eq, Default)] +pub struct RelayProofRequest { + /// Storage keys to include in the relay chain state proof. + pub keys: Vec, +} + sp_api::decl_runtime_apis! { /// Runtime api to collect information about a collation. /// @@ -513,4 +540,21 @@ sp_api::decl_runtime_apis! { /// Returns the target number of blocks per relay chain slot. fn target_block_rate() -> u32; } + + /// API for specifying which relay chain storage data to include in storage proofs. + /// + /// This API allows parachains to request both top-level relay chain storage keys + /// and child trie storage keys to be included in the relay chain state proof. + pub trait KeyToIncludeInRelayProofApi { + /// Returns relay chain storage proof requests. + /// + /// The returned `RelayProofRequest` contains a list of storage keys where each key + /// can be either: + /// - `RelayStorageKey::Top`: Top-level relay chain storage key + /// - `RelayStorageKey::Child`: Child trie storage, containing the child trie identifier + /// and the key to prove from that child trie + /// + /// The collator generates proofs for these and includes them in the relay chain state proof. + fn keys_to_prove() -> RelayProofRequest; + } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index dfe37e10d05d7..c20113aa69b24 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -385,6 +385,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; + type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} @@ -641,6 +642,12 @@ impl_runtime_apis! { 1 } } + + impl cumulus_primitives_core::KeyToIncludeInRelayProofApi for Runtime { + fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + Default::default() + } + } } cumulus_pallet_parachain_system::register_validate_block! { From 25fa5d4b27d46e2e0fa24b350637c3cc68dc8dc2 Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 22:50:21 -0300 Subject: [PATCH 02/69] choir: shorter imports --- cumulus/client/consensus/aura/src/collator.rs | 5 +++-- cumulus/client/consensus/aura/src/collators/mod.rs | 4 ++-- cumulus/client/parachain-inherent/src/lib.rs | 12 +++++------- .../relay-chain-inprocess-interface/src/lib.rs | 6 ++++-- cumulus/client/relay-chain-rpc-interface/src/lib.rs | 9 ++++----- .../lib/src/fake_runtime_api/utils.rs | 4 ++-- cumulus/test/runtime/src/lib.rs | 4 ++-- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 2fab2f7e424d9..9a7cdadba5b36 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -33,6 +33,7 @@ use cumulus_client_consensus_common::{ use cumulus_client_parachain_inherent::{ParachainInherentData, ParachainInherentDataProvider}; use cumulus_primitives_core::{ relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, + RelayProofRequest, }; use cumulus_relay_chain_interface::RelayChainInterface; use sc_client_api::BackendTransaction; @@ -177,7 +178,7 @@ where parent_hash: Block::Hash, timestamp: impl Into>, relay_parent_descendants: Option, - relay_proof_request: cumulus_primitives_core::RelayProofRequest, + relay_proof_request: RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { let paras_inherent_data = ParachainInherentDataProvider::create_at( @@ -226,7 +227,7 @@ where validation_data: &PersistedValidationData, parent_hash: Block::Hash, timestamp: impl Into>, - relay_proof_request: cumulus_primitives_core::RelayProofRequest, + relay_proof_request: RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { self.create_inherent_data_with_rp_offset( diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 0b2981691fc24..5fcb0bee7bf63 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -26,7 +26,7 @@ use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, + relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, RelayProofRequest, }; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use polkadot_node_subsystem::messages::{CollatorProtocolMessage, RuntimeApiRequest}; @@ -673,7 +673,7 @@ mod tests { fn get_relay_proof_request( client: &Client, parent_hash: Block::Hash, -) -> cumulus_primitives_core::RelayProofRequest +) -> RelayProofRequest where Block: BlockT, Client: ProvideRuntimeApi, diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 8ea3c4f96c5ae..b002b5c1fa59f 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -24,7 +24,7 @@ use cumulus_primitives_core::{ self, ApprovedPeerId, Block as RelayBlock, Hash as PHash, Header as RelayHeader, HrmpChannelId, }, - ParaId, PersistedValidationData, + ParaId, PersistedValidationData, RelayProofRequest, RelayStorageKey, }; pub use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_IDENTIFIER}; use cumulus_relay_chain_interface::RelayChainInterface; @@ -44,7 +44,7 @@ async fn collect_relay_storage_proof( include_authorities: bool, include_next_authorities: bool, additional_relay_state_keys: Vec>, -) -> Option { +) -> Option { use relay_chain::well_known_keys as relay_well_known_keys; let ingress_channels = relay_chain_interface @@ -169,11 +169,9 @@ async fn collect_relay_storage_proof( async fn collect_relay_storage_proofs( relay_chain_interface: &impl RelayChainInterface, relay_parent: PHash, - relay_proof_request: cumulus_primitives_core::RelayProofRequest, + relay_proof_request: RelayProofRequest, ) -> Option { - use cumulus_primitives_core::RelayStorageKey; - - let cumulus_primitives_core::RelayProofRequest { keys } = relay_proof_request; + let RelayProofRequest { keys } = relay_proof_request; if keys.is_empty() { return None; @@ -250,7 +248,7 @@ impl ParachainInherentDataProvider { para_id: ParaId, relay_parent_descendants: Vec, additional_relay_state_keys: Vec>, - relay_proof_request: cumulus_primitives_core::RelayProofRequest, + relay_proof_request: RelayProofRequest, collator_peer_id: PeerId, ) -> Option { let collator_peer_id = ApprovedPeerId::try_from(collator_peer_id.to_bytes()) diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index f7b3f810b6015..02726367b7fe5 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -33,7 +33,9 @@ use cumulus_primitives_core::{ }, InboundDownwardMessage, ParaId, PersistedValidationData, }; -use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; +use cumulus_relay_chain_interface::{ + ChildInfo, RelayChainError, RelayChainInterface, RelayChainResult, +}; use futures::{FutureExt, Stream, StreamExt}; use polkadot_primitives::CandidateEvent; use polkadot_service::{ @@ -243,7 +245,7 @@ impl RelayChainInterface for RelayChainInProcessInterface { async fn prove_child_read( &self, relay_parent: PHash, - child_info: &cumulus_relay_chain_interface::ChildInfo, + child_info: &ChildInfo, child_keys: &[Vec], ) -> RelayChainResult { let state_backend = self.backend.state_at(relay_parent, TrieCacheContext::Untrusted)?; diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 9c7732e6e452e..7960444060e38 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -26,7 +26,8 @@ use cumulus_primitives_core::{ InboundDownwardMessage, ParaId, PersistedValidationData, }; use cumulus_relay_chain_interface::{ - BlockNumber, CoreState, PHeader, RelayChainError, RelayChainInterface, RelayChainResult, + BlockNumber, ChildInfo, CoreIndex, CoreState, PHeader, RelayChainError, RelayChainInterface, + RelayChainResult, }; use futures::{FutureExt, Stream, StreamExt}; use polkadot_overseer::Handle; @@ -213,7 +214,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn prove_child_read( &self, _relay_parent: RelayHash, - _child_info: &cumulus_relay_chain_interface::ChildInfo, + _child_info: &ChildInfo, _child_keys: &[Vec], ) -> RelayChainResult { // Not implemented: requires relay chain RPC to expose child trie proof method. @@ -287,9 +288,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn claim_queue( &self, relay_parent: RelayHash, - ) -> RelayChainResult< - BTreeMap>, - > { + ) -> RelayChainResult>> { self.rpc_client.parachain_host_claim_queue(relay_parent).await } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 9d60924f55b84..ab0a9e1826739 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -15,7 +15,7 @@ // limitations under the License. pub(crate) mod imports { - pub use cumulus_primitives_core::ParaId; + pub use cumulus_primitives_core::{ParaId, RelayProofRequest}; pub use parachains_common_types::{AccountId, Balance, Nonce}; pub use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::{ @@ -177,7 +177,7 @@ macro_rules! impl_node_runtime_apis { } impl cumulus_primitives_core::KeyToIncludeInRelayProofApi<$block> for $runtime { - fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + fn keys_to_prove() -> RelayProofRequest { unimplemented!() } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index c20113aa69b24..1492ef02b8370 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -87,7 +87,7 @@ use sp_runtime::{ use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::ParaId; +use cumulus_primitives_core::{ParaId, RelayProofRequest}; // A few exports that help ease life for downstream crates. pub use frame_support::{ @@ -644,7 +644,7 @@ impl_runtime_apis! { } impl cumulus_primitives_core::KeyToIncludeInRelayProofApi for Runtime { - fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + fn keys_to_prove() -> RelayProofRequest { Default::default() } } From b9fac0d4ad458dadbba65ff1144bea0b50f3ad72 Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 17 Dec 2025 08:51:39 -0300 Subject: [PATCH 03/69] feat: better naming and batch merging of proofs --- cumulus/client/parachain-inherent/src/lib.rs | 25 +++++++++++--------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index b002b5c1fa59f..093bc6990245e 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -159,14 +159,14 @@ async fn collect_relay_storage_proof( .ok() } -/// Collect storage proofs for relay chain data. +/// Collect additional storage proofs requested by the runtime. /// /// Generates proofs for both top-level relay chain storage and child trie data. /// Top-level keys are proven directly. Child trie roots are automatically included /// from their standard storage locations (`:child_storage:default:` + identifier). /// /// Returns a merged proof combining all requested data, or `None` if there are no requests. -async fn collect_relay_storage_proofs( +async fn collect_additional_storage_proofs( relay_chain_interface: &impl RelayChainInterface, relay_parent: PHash, relay_proof_request: RelayProofRequest, @@ -177,8 +177,6 @@ async fn collect_relay_storage_proofs( return None; } - let mut combined_proof: Option = None; - // Group keys by storage type let mut top_keys = Vec::new(); let mut child_keys: std::collections::BTreeMap, Vec>> = @@ -193,11 +191,14 @@ async fn collect_relay_storage_proofs( } } + // Collect all storage proofs + let mut all_proofs = Vec::new(); + // Collect top-level storage proofs if !top_keys.is_empty() { match relay_chain_interface.prove_read(relay_parent, &top_keys).await { Ok(top_proof) => { - combined_proof = Some(top_proof); + all_proofs.push(top_proof); }, Err(e) => { tracing::error!( @@ -215,10 +216,7 @@ async fn collect_relay_storage_proofs( let child_info = ChildInfo::new_default(&storage_key); match relay_chain_interface.prove_child_read(relay_parent, &child_info, &data_keys).await { Ok(child_proof) => { - combined_proof = match combined_proof { - None => Some(child_proof), - Some(existing) => Some(StorageProof::merge([existing, child_proof])), - }; + all_proofs.push(child_proof); }, Err(e) => { tracing::error!( @@ -232,7 +230,12 @@ async fn collect_relay_storage_proofs( } } - combined_proof + // Merge all proofs + if all_proofs.is_empty() { + None + } else { + Some(StorageProof::merge(all_proofs)) + } } pub struct ParachainInherentDataProvider; @@ -279,7 +282,7 @@ impl ParachainInherentDataProvider { // Collect additional requested storage proofs (top-level and child tries) if let Some(additional_proofs) = - collect_relay_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) + collect_additional_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) .await { relay_chain_state = StorageProof::merge([relay_chain_state, additional_proofs]); From a9f3bc7f5e32ce2ee0a123958adeea2ec3c9c3ef Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 19 Dec 2025 13:41:21 -0300 Subject: [PATCH 04/69] feat: naming and comment suggestions --- cumulus/primitives/core/src/lib.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index d4dc8ccb8dc4e..fddf4ffa8e730 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -545,16 +545,11 @@ sp_api::decl_runtime_apis! { /// /// This API allows parachains to request both top-level relay chain storage keys /// and child trie storage keys to be included in the relay chain state proof. - pub trait KeyToIncludeInRelayProofApi { + pub trait KeyToIncludeInRelayProof { /// Returns relay chain storage proof requests. /// - /// The returned `RelayProofRequest` contains a list of storage keys where each key - /// can be either: - /// - `RelayStorageKey::Top`: Top-level relay chain storage key - /// - `RelayStorageKey::Child`: Child trie storage, containing the child trie identifier - /// and the key to prove from that child trie - /// - /// The collator generates proofs for these and includes them in the relay chain state proof. + + /// The collator will include them in the relay chain proof that is passed alongside the parachain inherent into the runtime. fn keys_to_prove() -> RelayProofRequest; } } From f8c4aea1c2993c4e7a4bafb27d58f47245dd9852 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 19 Dec 2025 14:31:57 -0300 Subject: [PATCH 05/69] fix: name change occurences fix --- cumulus/client/consensus/aura/src/collators/lookahead.rs | 6 +++--- cumulus/client/consensus/aura/src/collators/mod.rs | 4 ++-- cumulus/pallets/parachain-system/src/lib.rs | 2 +- cumulus/polkadot-omni-node/lib/src/common/aura.rs | 6 +++--- .../polkadot-omni-node/lib/src/fake_runtime_api/utils.rs | 2 +- cumulus/test/runtime/src/lib.rs | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index c33ce1c41d6c9..f20a64abdfc7b 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -36,7 +36,7 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{CollectCollationInfo, KeyToIncludeInRelayProofApi, PersistedValidationData}; +use cumulus_primitives_core::{CollectCollationInfo, KeyToIncludeInRelayProof, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use sp_consensus::Environment; @@ -167,7 +167,7 @@ where Client::Api: AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProofApi, + + KeyToIncludeInRelayProof, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -221,7 +221,7 @@ where Client::Api: AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProofApi, + + KeyToIncludeInRelayProof, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 5fcb0bee7bf63..e22d77c3d1b0f 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -26,7 +26,7 @@ use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, RelayProofRequest, + relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProof, RelayProofRequest, }; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use polkadot_node_subsystem::messages::{CollatorProtocolMessage, RuntimeApiRequest}; @@ -677,7 +677,7 @@ fn get_relay_proof_request( where Block: BlockT, Client: ProvideRuntimeApi, - Client::Api: KeyToIncludeInRelayProofApi, + Client::Api: KeyToIncludeInRelayProof, { client .runtime_api() diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index ed55293a88db0..1b26dead9bc02 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -269,7 +269,7 @@ pub mod pallet { /// /// This allows parachains to process data from the relay chain state proof, /// including both child trie keys and main trie keys that were requested - /// via `KeyToIncludeInRelayProofApi`. + /// via `KeyToIncludeInRelayProof`. type RelayProofKeysProcessor: relay_state_snapshot::ProcessRelayProofKeys; } diff --git a/cumulus/polkadot-omni-node/lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs index 10a631306b33a..b6f156f96dfdb 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/aura.rs @@ -18,7 +18,7 @@ use codec::Codec; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::KeyToIncludeInRelayProofApi; +use cumulus_primitives_core::KeyToIncludeInRelayProof; use sp_consensus_aura::AuraApi; use sp_runtime::{ app_crypto::{AppCrypto, AppPair, AppSignature, Pair}, @@ -54,7 +54,7 @@ pub trait AuraRuntimeApi: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProofApi + + KeyToIncludeInRelayProof + Sized { /// Check if the runtime has the Aura API. @@ -68,6 +68,6 @@ impl AuraRuntimeApi for T wher T: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProofApi + + KeyToIncludeInRelayProof { } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index ab0a9e1826739..1f6dfa177e96d 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -176,7 +176,7 @@ macro_rules! impl_node_runtime_apis { } } - impl cumulus_primitives_core::KeyToIncludeInRelayProofApi<$block> for $runtime { + impl cumulus_primitives_core::KeyToIncludeInRelayProof<$block> for $runtime { fn keys_to_prove() -> RelayProofRequest { unimplemented!() } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 1492ef02b8370..2b110e9cb8b90 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -643,7 +643,7 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::KeyToIncludeInRelayProofApi for Runtime { + impl cumulus_primitives_core::KeyToIncludeInRelayProof for Runtime { fn keys_to_prove() -> RelayProofRequest { Default::default() } From de2e7c83ae5dde0f442a69b57b51b63bcef4c628 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 19 Dec 2025 16:05:41 -0300 Subject: [PATCH 06/69] feat: collect static keys and unify prove_read --- cumulus/client/parachain-inherent/src/lib.rs | 119 ++++++++----------- 1 file changed, 51 insertions(+), 68 deletions(-) diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 093bc6990245e..6c72e1774b1ca 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -35,16 +35,15 @@ use sp_storage::ChildInfo; const LOG_TARGET: &str = "parachain-inherent"; -/// Collect the relevant relay chain state in form of a proof for putting it into the validation -/// data inherent. -async fn collect_relay_storage_proof( +/// Builds the list of static relay chain storage keys that are always needed for parachain +/// validation. +async fn get_static_relay_storage_keys( relay_chain_interface: &impl RelayChainInterface, para_id: ParaId, relay_parent: PHash, include_authorities: bool, include_next_authorities: bool, - additional_relay_state_keys: Vec>, -) -> Option { +) -> Option>> { use relay_chain::well_known_keys as relay_well_known_keys; let ingress_channels = relay_chain_interface @@ -138,53 +137,49 @@ async fn collect_relay_storage_proof( relevant_keys.push(relay_well_known_keys::NEXT_AUTHORITIES.to_vec()); } - // Add additional relay state keys - let unique_keys: Vec> = additional_relay_state_keys - .into_iter() - .filter(|key| !relevant_keys.contains(key)) - .collect(); - relevant_keys.extend(unique_keys); - - relay_chain_interface - .prove_read(relay_parent, &relevant_keys) - .await - .map_err(|e| { - tracing::error!( - target: LOG_TARGET, - relay_parent = ?relay_parent, - error = ?e, - "Cannot obtain read proof from relay chain.", - ); - }) - .ok() + Some(relevant_keys) } -/// Collect additional storage proofs requested by the runtime. -/// -/// Generates proofs for both top-level relay chain storage and child trie data. -/// Top-level keys are proven directly. Child trie roots are automatically included -/// from their standard storage locations (`:child_storage:default:` + identifier). -/// -/// Returns a merged proof combining all requested data, or `None` if there are no requests. -async fn collect_additional_storage_proofs( +/// Collect the relevant relay chain state in form of a proof for putting it into the validation +/// data inherent. +async fn collect_relay_storage_proof( relay_chain_interface: &impl RelayChainInterface, + para_id: ParaId, relay_parent: PHash, + include_authorities: bool, + include_next_authorities: bool, + additional_relay_state_keys: Vec>, relay_proof_request: RelayProofRequest, ) -> Option { - let RelayProofRequest { keys } = relay_proof_request; + // Get static keys that are always needed + let mut all_top_keys = get_static_relay_storage_keys( + relay_chain_interface, + para_id, + relay_parent, + include_authorities, + include_next_authorities, + ) + .await?; - if keys.is_empty() { - return None; - } + // Add additional_relay_state_keys + let unique_keys: Vec> = additional_relay_state_keys + .into_iter() + .filter(|key| !all_top_keys.contains(key)) + .collect(); + all_top_keys.extend(unique_keys); - // Group keys by storage type - let mut top_keys = Vec::new(); + // Group requested keys by storage type + let RelayProofRequest { keys } = relay_proof_request; let mut child_keys: std::collections::BTreeMap, Vec>> = std::collections::BTreeMap::new(); for key in keys { match key { - RelayStorageKey::Top(k) => top_keys.push(k), + RelayStorageKey::Top(k) => { + if !all_top_keys.contains(&k) { + all_top_keys.push(k); + } + }, RelayStorageKey::Child { storage_key, key } => { child_keys.entry(storage_key).or_default().push(key); }, @@ -194,21 +189,20 @@ async fn collect_additional_storage_proofs( // Collect all storage proofs let mut all_proofs = Vec::new(); - // Collect top-level storage proofs - if !top_keys.is_empty() { - match relay_chain_interface.prove_read(relay_parent, &top_keys).await { - Ok(top_proof) => { - all_proofs.push(top_proof); - }, - Err(e) => { - tracing::error!( - target: LOG_TARGET, - relay_parent = ?relay_parent, - error = ?e, - "Cannot obtain top-level storage proof from relay chain.", - ); - }, - } + // Collect top-level storage proof. + match relay_chain_interface.prove_read(relay_parent, &all_top_keys).await { + Ok(top_proof) => { + all_proofs.push(top_proof); + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + error = ?e, + "Cannot obtain relay chain storage proof.", + ); + return None; + }, } // Collect child trie proofs @@ -231,11 +225,7 @@ async fn collect_additional_storage_proofs( } // Merge all proofs - if all_proofs.is_empty() { - None - } else { - Some(StorageProof::merge(all_proofs)) - } + Some(StorageProof::merge(all_proofs)) } pub struct ParachainInherentDataProvider; @@ -270,24 +260,17 @@ impl ParachainInherentDataProvider { .iter() .skip(1) .any(sc_consensus_babe::contains_epoch_change::); - let mut relay_chain_state = collect_relay_storage_proof( + let relay_chain_state = collect_relay_storage_proof( relay_chain_interface, para_id, relay_parent, !relay_parent_descendants.is_empty(), include_next_authorities, additional_relay_state_keys, + relay_proof_request, ) .await?; - // Collect additional requested storage proofs (top-level and child tries) - if let Some(additional_proofs) = - collect_additional_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) - .await - { - relay_chain_state = StorageProof::merge([relay_chain_state, additional_proofs]); - } - let downward_messages = relay_chain_interface .retrieve_dmq_contents(para_id, relay_parent) .await From 1f2a62ef9c8697b3a319012b6ce3fb1bf79d78ac Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 19 Dec 2025 23:06:33 -0300 Subject: [PATCH 07/69] feat: move relay proof handler to OnSystemEvent --- cumulus/pallets/aura-ext/src/test.rs | 1 - cumulus/pallets/parachain-system/src/lib.rs | 32 +++++++++++++------ cumulus/pallets/parachain-system/src/mock.rs | 1 - .../src/relay_state_snapshot.rs | 15 --------- cumulus/pallets/solo-to-para/src/lib.rs | 5 +++ cumulus/pallets/xcmp-queue/src/mock.rs | 1 - .../assets/asset-hub-rococo/src/lib.rs | 1 - .../assets/asset-hub-westend/src/lib.rs | 1 - .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 - .../bridge-hubs/bridge-hub-westend/src/lib.rs | 1 - .../collectives-westend/src/lib.rs | 1 - .../coretime/coretime-westend/src/lib.rs | 1 - .../glutton/glutton-westend/src/lib.rs | 1 - .../runtimes/people/people-westend/src/lib.rs | 1 - .../runtimes/testing/penpal/src/lib.rs | 1 - .../testing/yet-another-parachain/src/lib.rs | 1 - cumulus/test/runtime/src/lib.rs | 1 - 17 files changed, 27 insertions(+), 39 deletions(-) diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs index 3486e56a5c2e4..7c4c78ab2a5b0 100644 --- a/cumulus/pallets/aura-ext/src/test.rs +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -151,7 +151,6 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } fn set_ancestors() { diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 1b26dead9bc02..5f12e471e8e98 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -85,7 +85,6 @@ use unincluded_segment::{ }; pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; -pub use relay_state_snapshot::ProcessRelayProofKeys; /// Register the `validate_block` function that is used by parachains to validate blocks on a /// validator. /// @@ -264,13 +263,6 @@ pub mod pallet { /// /// If set to 0, this config has no impact. type RelayParentOffset: Get; - - /// Processor for relay chain proof keys. - /// - /// This allows parachains to process data from the relay chain state proof, - /// including both child trie keys and main trie keys that were requested - /// via `KeyToIncludeInRelayProof`. - type RelayProofKeysProcessor: relay_state_snapshot::ProcessRelayProofKeys; } #[pallet::hooks] @@ -709,7 +701,9 @@ pub mod pallet { >::put(relevant_messaging_state.clone()); >::put(host_config); - total_weight.saturating_accrue(T::RelayProofKeysProcessor::process_relay_proof_keys(&relay_state_proof)); + total_weight.saturating_accrue( + ::on_relay_state_proof(&relay_state_proof), + ); ::on_validation_data(&vfp); @@ -1776,13 +1770,31 @@ impl polkadot_runtime_parachains::EnsureForParachain for Pallet { /// Or like [`on_validation_code_applied`](Self::on_validation_code_applied) that is called /// when the new validation is written to the state. This means that /// from the next block the runtime is being using this new code. -#[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OnSystemEvent { /// Called in each blocks once when the validation data is set by the inherent. fn on_validation_data(data: &PersistedValidationData); /// Called when the validation code is being applied, aka from the next block on this is the new /// runtime. fn on_validation_code_applied(); + /// Called to process keys from the verified relay chain state proof. + fn on_relay_state_proof(relay_state_proof: &relay_state_snapshot::RelayChainStateProof) -> Weight; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl OnSystemEvent for Tuple { + fn on_validation_data(data: &PersistedValidationData) { + for_tuples!( #( Tuple::on_validation_data(data); )* ); + } + + fn on_validation_code_applied() { + for_tuples!( #( Tuple::on_validation_code_applied(); )* ); + } + + fn on_relay_state_proof(relay_state_proof: &relay_state_snapshot::RelayChainStateProof) -> Weight { + let mut weight = Weight::zero(); + for_tuples!( #( weight = weight.saturating_add(Tuple::on_relay_state_proof(relay_state_proof)); )* ); + weight + } } /// Holds the most recent relay-parent state root and block number of the current parachain block. diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index b361031be2c37..d3c7cef52b637 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -99,7 +99,6 @@ impl Config for Test { type ConsensusHook = TestConsensusHook; type WeightInfo = (); type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } std::thread_local! { diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 7c6efb5ddf73e..e9c32997f9760 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -21,26 +21,11 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, AbridgedHrmpChannel, ParaId, }; -use frame_support::weights::Weight; use scale_info::TypeInfo; use sp_runtime::traits::HashingFor; use sp_state_machine::{Backend, TrieBackend, TrieBackendBuilder}; use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX}; -/// Process keys from verified relay chain state proofs. -/// -/// This trait allows processing of relay chain storage data from the verified proof. -pub trait ProcessRelayProofKeys { - /// Process keys from a verified relay state proof. - fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight; -} - -impl ProcessRelayProofKeys for () { - fn process_relay_proof_keys(_verified_proof: &RelayChainStateProof) -> Weight { - Weight::zero() - } -} - /// The capacity of the upward message queue of a parachain on the relay chain. // The field order should stay the same as the data can be found in the proof to ensure both are // have the same encoded representation. diff --git a/cumulus/pallets/solo-to-para/src/lib.rs b/cumulus/pallets/solo-to-para/src/lib.rs index ff68d1b63fe7f..0d6d82cf4590b 100644 --- a/cumulus/pallets/solo-to-para/src/lib.rs +++ b/cumulus/pallets/solo-to-para/src/lib.rs @@ -103,5 +103,10 @@ pub mod pallet { fn on_validation_code_applied() { crate::Pallet::::set_pending_custom_validation_head_data(); } + fn on_relay_state_proof( + _relay_state_proof: ¶chain_system::relay_state_snapshot::RelayChainStateProof, + ) -> frame_support::weights::Weight { + frame_support::weights::Weight::zero() + } } } diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 1e32c9003a948..3be87221c052e 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -106,7 +106,6 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 720ebe8ee2b4b..69c7a9e544326 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -745,7 +745,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f0cb527c42f09..8b9cbf66015c7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -903,7 +903,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index dac8fc398127b..cfc58a2a4f6eb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -400,7 +400,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 625c2ebe24507..62532fac5fec3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -392,7 +392,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 37edc0f02329c..ba70aabd9d0cf 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -423,7 +423,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index fb72ae7a59586..ed8748a34933a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -305,7 +305,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 98131ac4ef58a..eadc4d289fe9d 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -191,7 +191,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 427ff1454d8fd..dc4a616f02d1c 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -281,7 +281,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 4d1ee6a0d2254..bc018b160f778 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -664,7 +664,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index 1c5ead9e7b90b..0f5d2ecd99494 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -368,7 +368,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; - type RelayProofKeysProcessor = (); } impl pallet_message_queue::Config for Runtime { diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 2b110e9cb8b90..57ea6cab00477 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -385,7 +385,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; - type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} From a384b4c7ca62e2b57f068f2d14395eadc5bf6118 Mon Sep 17 00:00:00 2001 From: metricaez Date: Sat, 20 Dec 2025 12:55:01 -0300 Subject: [PATCH 08/69] feat: prove_child_read for relay rpc interface --- .../relay-chain-rpc-interface/src/lib.rs | 22 +++++++++++-------- .../src/rpc_client.rs | 11 ++++++++++ 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 7960444060e38..b88d95e9c1778 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -213,16 +213,20 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn prove_child_read( &self, - _relay_parent: RelayHash, - _child_info: &ChildInfo, - _child_keys: &[Vec], + relay_parent: RelayHash, + child_info: &ChildInfo, + child_keys: &[Vec], ) -> RelayChainResult { - // Not implemented: requires relay chain RPC to expose child trie proof method. - tracing::warn!( - target: "relay-chain-rpc-interface", - "prove_child_read not implemented for RPC interface, returning empty proof" - ); - Ok(StorageProof::empty()) + let child_storage_key = child_info.prefixed_storage_key(); + let storage_keys: Vec = + child_keys.iter().map(|key| StorageKey(key.clone())).collect(); + + self.rpc_client + .state_get_child_read_proof(child_storage_key, storage_keys, Some(relay_parent)) + .await + .map(|read_proof| { + StorageProof::new(read_proof.proof.into_iter().map(|bytes| bytes.to_vec())) + }) } /// Wait for a given relay chain block diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 80858a665cfaf..52039a4236a58 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -276,6 +276,17 @@ impl RelayChainRpcClient { self.request("state_getReadProof", params).await } + /// Get child trie read proof for `child_keys` + pub async fn state_get_child_read_proof( + &self, + child_storage_key: sp_core::storage::PrefixedStorageKey, + child_keys: Vec, + at: Option, + ) -> Result, RelayChainError> { + let params = rpc_params![child_storage_key, child_keys, at]; + self.request("state_getChildReadProof", params).await + } + /// Retrieve storage item at `storage_key` pub async fn state_get_storage( &self, From 500ad3d5c41493f7b9583c0c260aff08dbfc29d9 Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 22 Dec 2025 00:08:37 -0300 Subject: [PATCH 09/69] feat: imp KeyToIncludeInRelayProof for test pallet --- Cargo.lock | 1 + cumulus/test/runtime/Cargo.toml | 4 ++++ cumulus/test/runtime/src/lib.rs | 13 ++++++++--- cumulus/test/runtime/src/test_pallet.rs | 29 +++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3ac99dd9da2ab..33bcb88f2b890 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5032,6 +5032,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "parity-scale-codec", + "polkadot-primitives", "scale-info", "serde_json", "sp-api", diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index cc8142ff2dedb..703e346582891 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -41,6 +41,9 @@ sp-session = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } +# Polkadot +polkadot-primitives = { workspace = true } + # Cumulus cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } @@ -75,6 +78,7 @@ std = [ "pallet-timestamp/std", "pallet-transaction-payment/std", "parachain-info/std", + "polkadot-primitives/std", "scale-info/std", "serde_json/std", "sp-api/std", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 57ea6cab00477..e668fc076df84 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -374,7 +374,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type WeightInfo = (); type SelfParaId = parachain_info::Pallet; type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); + type OnSystemEvent = TestPallet; type OutboundXcmpMessageSource = (); // Ignore all DMP messages by enqueueing them into `()`: type DmpQueue = frame_support::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; @@ -643,8 +643,15 @@ impl_runtime_apis! { } impl cumulus_primitives_core::KeyToIncludeInRelayProof for Runtime { - fn keys_to_prove() -> RelayProofRequest { - Default::default() + fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + use cumulus_primitives_core::RelayStorageKey; + + RelayProofRequest { + keys: vec![ + // Request a well-known key to verify its inclusion in the relay proof. + RelayStorageKey::Top(test_pallet::RELAY_EPOCH_INDEX_KEY.to_vec()), + ], + } } } } diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index a972198c300d9..25d1d03546b9d 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -17,10 +17,15 @@ /// A special pallet that exposes dispatchables that are only useful for testing. pub use pallet::*; +use polkadot_primitives::well_known_keys; + /// Some key that we set in genesis and only read in [`TestOnRuntimeUpgrade`] to ensure that /// [`OnRuntimeUpgrade`] works as expected. pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; +/// A well-known key to request for inclusion in the proof. +pub use well_known_keys::EPOCH_INDEX as RELAY_EPOCH_INDEX_KEY; + #[frame_support::pallet(dev_mode)] pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; @@ -121,3 +126,27 @@ pub mod pallet { } } } + +impl cumulus_pallet_parachain_system::OnSystemEvent for Pallet { + fn on_validation_data(_data: &cumulus_primitives_core::PersistedValidationData) { + // Nothing to do here for tests + } + + fn on_validation_code_applied() { + // Nothing to do here for tests + } + + fn on_relay_state_proof( + relay_state_proof: &cumulus_pallet_parachain_system::relay_state_snapshot::RelayChainStateProof, + ) -> frame_support::weights::Weight { + use crate::test_pallet::RELAY_EPOCH_INDEX_KEY; + + // Expect the requested key to be part of the proof. + relay_state_proof + .read_optional_entry::(RELAY_EPOCH_INDEX_KEY) + .expect("Invalid relay chain state proof") + .expect("EPOCH_INDEX must be present"); + + frame_support::weights::Weight::zero() + } +} From 8f87300450ee8300de34935b171dadb52581ca77 Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 24 Dec 2025 08:14:31 -0300 Subject: [PATCH 10/69] choir: fmt and move api warn to debug --- cumulus/client/consensus/aura/src/collators/mod.rs | 2 +- cumulus/pallets/parachain-system/src/lib.rs | 4 ++-- cumulus/primitives/core/src/lib.rs | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index e22d77c3d1b0f..fba1fa20f12b2 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -683,7 +683,7 @@ where .runtime_api() .keys_to_prove(parent_hash) .unwrap_or_else(|e| { - tracing::warn!( + tracing::debug!( target: crate::LOG_TARGET, error = ?e, "Failed to fetch relay proof requests from runtime, using empty request" diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 5f12e471e8e98..ab62b0b1550cb 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -702,8 +702,8 @@ pub mod pallet { >::put(host_config); total_weight.saturating_accrue( - ::on_relay_state_proof(&relay_state_proof), - ); + ::on_relay_state_proof(&relay_state_proof), + ); ::on_validation_data(&vfp); diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index fddf4ffa8e730..a97d58db7e0a9 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -548,7 +548,6 @@ sp_api::decl_runtime_apis! { pub trait KeyToIncludeInRelayProof { /// Returns relay chain storage proof requests. /// - /// The collator will include them in the relay chain proof that is passed alongside the parachain inherent into the runtime. fn keys_to_prove() -> RelayProofRequest; } From cbc8986bef21074f468951395eeb4e9dcdf69842 Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 24 Dec 2025 14:07:02 -0300 Subject: [PATCH 11/69] feat: test pallet check with Alice balance --- Cargo.lock | 2 ++ cumulus/test/relay-sproof-builder/Cargo.toml | 4 +++ cumulus/test/relay-sproof-builder/src/lib.rs | 18 +++++++++++++ cumulus/test/runtime/src/lib.rs | 4 +-- cumulus/test/runtime/src/test_pallet.rs | 28 +++++++++++++++----- 5 files changed, 47 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 33bcb88f2b890..57ced506acf03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5004,6 +5004,8 @@ dependencies = [ "proptest", "sp-consensus-babe", "sp-core 28.0.0", + "sp-io", + "sp-keyring", "sp-runtime", "sp-state-machine", "sp-trie", diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index 2ae2d9bf00034..a7ea2472bb290 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -17,6 +17,8 @@ codec = { features = ["derive"], workspace = true } # Substrate sp-consensus-babe = { workspace = true } sp-core = { workspace = true } +sp-io = { workspace = true } +sp-keyring = { workspace = true } sp-runtime = { workspace = true } sp-state-machine = { workspace = true } sp-trie = { workspace = true } @@ -38,6 +40,8 @@ std = [ "polkadot-primitives/std", "sp-consensus-babe/std", "sp-core/std", + "sp-io/std", + "sp-keyring/std", "sp-runtime/std", "sp-state-machine/std", "sp-trie/std", diff --git a/cumulus/test/relay-sproof-builder/src/lib.rs b/cumulus/test/relay-sproof-builder/src/lib.rs index ee8cd92fd9d93..0c19be49cfac8 100644 --- a/cumulus/test/relay-sproof-builder/src/lib.rs +++ b/cumulus/test/relay-sproof-builder/src/lib.rs @@ -247,6 +247,9 @@ impl RelayStateSproofBuilder { ); insert(relay_chain::well_known_keys::CURRENT_SLOT.to_vec(), self.current_slot.encode()); + let (alice_key, alice_data) = generate_alice_account(); + insert(alice_key, alice_data); + for (key, value) in self.additional_key_values { insert(key, value); } @@ -273,6 +276,21 @@ fn convert_to_authority_weight_pair( .collect() } +/// Include to avoid `KeyToIncludeInRelayProof` test on test-pallet to break unit tests. +fn generate_alice_account() -> (Vec, Vec) { + use codec::Encode; + use sp_keyring::Sr25519Keyring; + + let alice = Sr25519Keyring::Alice.to_account_id(); + let mut alice_key = sp_io::hashing::twox_128(b"System").to_vec(); + alice_key.extend_from_slice(&sp_io::hashing::twox_128(b"Account")); + alice_key.extend_from_slice(&sp_io::hashing::blake2_128(&alice.encode())); + alice_key.extend_from_slice(&alice.encode()); + + let alice_account = (0u32, 0u32, 1u32, 0u32, 25_000_000_000_000u128, 0u128, 0u128, 0u128); + (alice_key, alice_account.encode()) +} + /// Add a BABE pre-digest to a generic header fn add_babe_pre_digest(header: &mut Header, authority_index: u32, block_number: u64) { /// This method generates some vrf data, but only to make the compiler happy diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index e668fc076df84..ea000d975e753 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -648,8 +648,8 @@ impl_runtime_apis! { RelayProofRequest { keys: vec![ - // Request a well-known key to verify its inclusion in the relay proof. - RelayStorageKey::Top(test_pallet::RELAY_EPOCH_INDEX_KEY.to_vec()), + // Request a key to verify its inclusion in the proof. + RelayStorageKey::Top(test_pallet::relay_alice_account_key()), ], } } diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 25d1d03546b9d..7f5d6b8cbd0ab 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -17,14 +17,24 @@ /// A special pallet that exposes dispatchables that are only useful for testing. pub use pallet::*; -use polkadot_primitives::well_known_keys; +use codec::Encode; /// Some key that we set in genesis and only read in [`TestOnRuntimeUpgrade`] to ensure that /// [`OnRuntimeUpgrade`] works as expected. pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; -/// A well-known key to request for inclusion in the proof. -pub use well_known_keys::EPOCH_INDEX as RELAY_EPOCH_INDEX_KEY; +/// Generates the storage key for Alice's account on the relay chain. +pub fn relay_alice_account_key() -> alloc::vec::Vec { + use sp_keyring::Sr25519Keyring; + + let alice = Sr25519Keyring::Alice.to_account_id(); + + let mut key = sp_io::hashing::twox_128(b"System").to_vec(); + key.extend_from_slice(&sp_io::hashing::twox_128(b"Account")); + key.extend_from_slice(&sp_io::hashing::blake2_128(&alice.encode())); + key.extend_from_slice(&alice.encode()); + key +} #[frame_support::pallet(dev_mode)] pub mod pallet { @@ -139,13 +149,17 @@ impl cumulus_pallet_parachain_system::OnSystemEvent for Pallet { fn on_relay_state_proof( relay_state_proof: &cumulus_pallet_parachain_system::relay_state_snapshot::RelayChainStateProof, ) -> frame_support::weights::Weight { - use crate::test_pallet::RELAY_EPOCH_INDEX_KEY; + use crate::{Balance, Nonce}; + use frame_system::AccountInfo; + use pallet_balances::AccountData; + + let alice_key = crate::test_pallet::relay_alice_account_key(); - // Expect the requested key to be part of the proof. + // Verify that Alice's account is included in the relay proof. relay_state_proof - .read_optional_entry::(RELAY_EPOCH_INDEX_KEY) + .read_optional_entry::>>(&alice_key) .expect("Invalid relay chain state proof") - .expect("EPOCH_INDEX must be present"); + .expect("Alice's account must be present in the relay proof"); frame_support::weights::Weight::zero() } From 6e6c669b659afb653c4cbf1c20ddc8b362154d17 Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 9 Dec 2025 15:48:37 -0300 Subject: [PATCH 12/69] feat: xcm Publish instruction and executor integration --- .../runtime/test-runtime/src/xcm_config.rs | 1 + .../src/fungible/mock.rs | 1 + .../src/generic/benchmarking.rs | 36 +++++ .../pallet-xcm-benchmarks/src/generic/mock.rs | 4 + .../pallet-xcm-benchmarks/src/generic/mod.rs | 8 + polkadot/xcm/pallet-xcm/src/errors.rs | 4 + polkadot/xcm/src/v4/mod.rs | 3 +- polkadot/xcm/src/v5/mod.rs | 31 +++- polkadot/xcm/src/v5/traits.rs | 3 + .../single_asset_adapter/mock.rs | 1 + polkadot/xcm/xcm-builder/src/test_utils.rs | 32 +++- polkadot/xcm/xcm-builder/src/tests/mock.rs | 1 + polkadot/xcm/xcm-builder/src/tests/mod.rs | 1 + .../xcm/xcm-builder/src/tests/pay/mock.rs | 1 + polkadot/xcm/xcm-builder/src/tests/publish.rs | 153 ++++++++++++++++++ polkadot/xcm/xcm-executor/src/config.rs | 10 +- polkadot/xcm/xcm-executor/src/lib.rs | 15 +- polkadot/xcm/xcm-executor/src/tests/mock.rs | 1 + .../src/traits/broadcast_handler.rs | 34 ++++ polkadot/xcm/xcm-executor/src/traits/mod.rs | 2 + 20 files changed, 329 insertions(+), 13 deletions(-) create mode 100644 polkadot/xcm/xcm-builder/src/tests/publish.rs create mode 100644 polkadot/xcm/xcm-executor/src/traits/broadcast_handler.rs diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index 8d7e351d0d5be..4b43918733c91 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -158,6 +158,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } impl pallet_xcm::Config for crate::Runtime { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index 9e06550b6b724..dc4c14e2f6432 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -122,6 +122,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } impl crate::Config for Test { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index aefbada7429dd..199543f4efa76 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -961,6 +961,42 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn publish(n: Linear<1, { MaxPublishItems::get() }>) -> Result<(), BenchmarkError> { + use xcm::latest::{MaxPublishKeyLength, MaxPublishValueLength}; + + // The `Publish` instruction weight scales with the number of items published. + // Each item is benchmarked at maximum key and value lengths to represent worst-case + // storage operations. The actual weight formula will be `base_weight + n * per_item_weight`. + let max_key_len = MaxPublishKeyLength::get() as usize; + let max_value_len = MaxPublishValueLength::get() as usize; + + // Create publish data: n items, each with maximum key and value length + let data_vec: Vec<_> = (0..n) + .map(|i| { + ( + BoundedVec::try_from(vec![i as u8; max_key_len]).unwrap(), + BoundedVec::try_from(vec![i as u8; max_value_len]).unwrap(), + ) + }) + .collect(); + + let data = BoundedVec::try_from(data_vec).unwrap(); + + let origin = T::publish_origin()?; + let mut executor = new_executor::(origin); + + let instruction = Instruction::Publish { data }; + let xcm = Xcm(vec![instruction]); + + #[block] + { + executor.bench_process(xcm)?; + } + + Ok(()) + } + impl_benchmark_test_suite!( Pallet, crate::generic::mock::new_test_ext(), diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 6368ca0e9c3f5..548baeb13cdb2 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -193,6 +193,10 @@ impl generic::Config for Test { let target: Location = AccountId32 { network: None, id: [0; 32] }.into(); Ok((origin, target)) } + + fn publish_origin() -> Result { + Ok(Parachain(1000).into()) + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index d7471b02368fa..5bfab2e7edc14 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -108,6 +108,14 @@ pub mod pallet { crate_version: as frame_support::traits::PalletInfoAccess>::crate_version(), } } + + /// Return a valid origin for `Publish` benchmark. + /// + /// Should return a parachain origin that is allowed by the BroadcastHandler filter. + /// If set to `Err`, benchmarks which rely on publish will be skipped. + fn publish_origin() -> Result { + Err(BenchmarkError::Skip) + } } #[pallet::pallet] diff --git a/polkadot/xcm/pallet-xcm/src/errors.rs b/polkadot/xcm/pallet-xcm/src/errors.rs index 84e544c08748e..6dccb583b9f67 100644 --- a/polkadot/xcm/pallet-xcm/src/errors.rs +++ b/polkadot/xcm/pallet-xcm/src/errors.rs @@ -136,6 +136,9 @@ pub enum ExecutionError { /// Too many assets matched the given asset filter. #[codec(index = 35)] TooManyAssets, + /// Publishing data failed. + #[codec(index = 36)] + PublishFailed, // Errors that happen prior to instructions being executed. These fall outside of the XCM // spec. /// XCM version not able to be handled. @@ -198,6 +201,7 @@ impl From for ExecutionError { XcmError::Unanchored => Self::Unanchored, XcmError::NotDepositable => Self::NotDepositable, XcmError::TooManyAssets => Self::TooManyAssets, + XcmError::PublishFailed => Self::PublishFailed, XcmError::UnhandledXcmVersion => Self::UnhandledXcmVersion, XcmError::WeightLimitReached(_) => Self::WeightLimitReached, XcmError::Barrier => Self::Barrier, diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 502200e849405..215cc710b5644 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1427,7 +1427,8 @@ impl TryFrom> for Instructi InitiateTransfer { .. } | PayFees { .. } | SetHints { .. } | - ExecuteWithOrigin { .. } => { + ExecuteWithOrigin { .. } | + Publish { .. } => { tracing::debug!(target: "xcm::versions::v5tov4", ?new_instruction, "not supported by v4"); return Err(()); }, diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index 0caf7d0c581fe..d7f6e6984e1bf 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -186,8 +186,9 @@ pub mod prelude { InstructionError, InstructionIndex, InteriorLocation, Junction::{self, *}, Junctions::{self, Here}, - Location, MaxAssetTransferFilters, MaybeErrorCode, + Location, MaxAssetTransferFilters, MaxPublishItems, MaybeErrorCode, NetworkId::{self, *}, + PublishData, OriginKind, Outcome, PalletInfo, Parent, ParentThen, PreparedMessage, QueryId, QueryResponseInfo, Reanchorable, Response, Result as XcmResult, SendError, SendResult, SendXcm, Weight, @@ -211,8 +212,16 @@ parameter_types! { pub MaxPalletNameLen: u32 = 48; pub MaxPalletsInfo: u32 = 64; pub MaxAssetTransferFilters: u32 = 6; + pub MaxPublishItems: u32 = 16; + pub MaxPublishKeyLength: u32 = 32; + pub MaxPublishValueLength: u32 = 1024; } +pub type PublishData = BoundedVec< + (BoundedVec, BoundedVec), + MaxPublishItems, +>; + #[derive( Clone, Eq, PartialEq, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo, MaxEncodedLen, )] @@ -1139,6 +1148,24 @@ pub enum Instruction { /// - `hints`: A bounded vector of `ExecutionHint`, specifying the different hints that will /// be activated. SetHints { hints: BoundedVec }, + + /// Publish data to the relay chain for other parachains to access. + /// + /// This instruction allows parachains to publish key-value data pairs to the relay chain + /// which are stored in child tries on the relay chain indexed by the publisher's ParaId. + /// + /// - `data`: The key-value pairs to be published, bounded by MaxPublishItems + /// + /// Safety: Origin must be a parachain (Sovereign Account). The relay chain will validate + /// the origin and store data in the appropriate child trie. + /// + /// Kind: *Command* + /// + /// Errors: + /// - NoPermission: If origin is not authorized by the configured filter + /// - BadOrigin: If origin is not a valid parachain + /// - PublishFailed: If the underlying handler fails (e.g., key/value too long, too many items) + Publish { data: PublishData }, } #[derive( @@ -1241,6 +1268,7 @@ impl Instruction { InitiateTransfer { destination, remote_fees, preserve_origin, assets, remote_xcm }, ExecuteWithOrigin { descendant_origin, xcm } => ExecuteWithOrigin { descendant_origin, xcm: xcm.into() }, + Publish { data } => Publish { data }, } } } @@ -1316,6 +1344,7 @@ impl> GetWeight for Instruction { W::initiate_transfer(destination, remote_fees, preserve_origin, assets, remote_xcm), ExecuteWithOrigin { descendant_origin, xcm } => W::execute_with_origin(descendant_origin, xcm), + Publish { data } => W::publish(data), } } } diff --git a/polkadot/xcm/src/v5/traits.rs b/polkadot/xcm/src/v5/traits.rs index ecbf46f84d31b..a157c176fa587 100644 --- a/polkadot/xcm/src/v5/traits.rs +++ b/polkadot/xcm/src/v5/traits.rs @@ -154,6 +154,9 @@ pub enum Error { /// Too many assets matched the given asset filter. #[codec(index = 35)] TooManyAssets, + /// Publishing data failed. + #[codec(index = 36)] + PublishFailed, // Errors that happen prior to instructions being executed. These fall outside of the XCM // spec. diff --git a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs index 30136b004a480..2eaae0dcee4a2 100644 --- a/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs +++ b/polkadot/xcm/xcm-builder/src/asset_exchange/single_asset_adapter/mock.rs @@ -250,6 +250,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); + type BroadcastHandler = (); type XcmRecorder = (); } diff --git a/polkadot/xcm/xcm-builder/src/test_utils.rs b/polkadot/xcm/xcm-builder/src/test_utils.rs index 90afb2c9a3d3e..26e6938a27c05 100644 --- a/polkadot/xcm/xcm-builder/src/test_utils.rs +++ b/polkadot/xcm/xcm-builder/src/test_utils.rs @@ -16,13 +16,13 @@ // Shared test utilities and implementations for the XCM Builder. -use alloc::vec::Vec; +use alloc::{collections::BTreeMap, vec::Vec}; use frame_support::{ parameter_types, traits::{Contains, CrateVersion, PalletInfoData, PalletsInfoAccess}, }; pub use xcm::latest::{prelude::*, Weight}; -use xcm_executor::traits::{ClaimAssets, DropAssets, VersionChangeNotifier}; +use xcm_executor::traits::{BroadcastHandler, ClaimAssets, DropAssets, VersionChangeNotifier}; pub use xcm_executor::{ traits::{ AssetExchange, AssetLock, ConvertOrigin, Enact, LockError, OnResponse, TransactAsset, @@ -33,6 +33,8 @@ pub use xcm_executor::{ parameter_types! { pub static SubscriptionRequests: Vec<(Location, Option<(QueryId, Weight)>)> = vec![]; pub static MaxAssetsIntoHolding: u32 = 4; + // Maps ParaId => Vec<(key, value)> + pub static PublishedData: BTreeMap, Vec)>> = BTreeMap::new(); } pub struct TestSubscriptionService; @@ -62,6 +64,32 @@ impl VersionChangeNotifier for TestSubscriptionService { } } +pub struct TestBroadcastHandler; + +impl BroadcastHandler for TestBroadcastHandler { + fn handle_publish(origin: &Location, data: PublishData) -> XcmResult { + // Extract para_id from origin + let para_id = match origin.unpack() { + (0, [Parachain(id)]) => *id, + (1, [Parachain(id), ..]) => *id, + _ => return Err(XcmError::BadOrigin), + }; + + let mut published = PublishedData::get(); + let data_vec: Vec<(Vec, Vec)> = data + .into_inner() + .into_iter() + .map(|(k, v)| (k.into_inner(), v.into_inner())) + .collect(); + + // Merge with existing data for this parachain + published.entry(para_id).or_insert_with(Vec::new).extend(data_vec); + PublishedData::set(published); + + Ok(()) + } +} + parameter_types! { pub static TrappedAssets: Vec<(Location, Assets)> = vec![]; } diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index b932aaee6fcf8..75758cca65690 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -771,6 +771,7 @@ impl Config for TestConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = TestBroadcastHandler; } pub fn fungible_multi_asset(location: Location, amount: u128) -> Asset { diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index 379baaf5e3767..f314d41d9ec65 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -35,6 +35,7 @@ mod expecting; mod locking; mod origins; mod pay; +mod publish; mod querying; mod routing; mod transacting; diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index d8f8e15f5eb05..6e231a5baf2d6 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -246,6 +246,7 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type BroadcastHandler = (); type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); diff --git a/polkadot/xcm/xcm-builder/src/tests/publish.rs b/polkadot/xcm/xcm-builder/src/tests/publish.rs new file mode 100644 index 0000000000000..6ac7369c555e8 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/tests/publish.rs @@ -0,0 +1,153 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for Publish XCM instruction. + +use super::*; +use crate::test_utils::PublishedData; +use sp_runtime::BoundedVec; +use xcm::latest::{MaxPublishKeyLength, MaxPublishValueLength}; + +// Helper to create test publish data +fn test_publish_data(items: Vec<(&[u8], &[u8])>) -> PublishData { + items + .into_iter() + .map(|(k, v)| { + ( + BoundedVec::::try_from(k.to_vec()).unwrap(), + BoundedVec::::try_from(v.to_vec()).unwrap(), + ) + }) + .collect::>() + .try_into() + .unwrap() +} + +#[test] +fn publish_from_parachain_works() { + // Allow unpaid execution from Parachain(1000) + AllowUnpaidFrom::set(vec![Parachain(1000).into()]); + + let data = test_publish_data(vec![(b"key1", b"value1")]); + + let message = Xcm::(vec![Publish { data: data.clone() }]); + let mut hash = fake_message_hash(&message); + let weight_limit = Weight::from_parts(10, 10); + + let r = XcmExecutor::::prepare_and_execute( + Parachain(1000), + message, + &mut hash, + weight_limit, + Weight::zero(), + ); + + assert_eq!(r, Outcome::Complete { used: Weight::from_parts(10, 10) }); + + // Verify data was published + let published = PublishedData::get(); + assert_eq!(published.get(&1000).unwrap().len(), 1); + assert_eq!(published.get(&1000).unwrap()[0], (b"key1".to_vec(), b"value1".to_vec())); +} + +#[test] +fn publish_from_non_parachain_fails() { + // Allow unpaid execution from Parent to test that origin validation happens + AllowUnpaidFrom::set(vec![Parent.into()]); + + let data = test_publish_data(vec![(b"key1", b"value1")]); + + let message = Xcm::(vec![Publish { data }]); + let mut hash = fake_message_hash(&message); + let weight_limit = Weight::from_parts(10, 10); + + // Try from Parent (not a parachain) + let r = XcmExecutor::::prepare_and_execute( + Parent, + message, + &mut hash, + weight_limit, + Weight::zero(), + ); + + assert_eq!( + r, + Outcome::Incomplete { + used: Weight::from_parts(10, 10), + error: InstructionError { index: 0, error: XcmError::BadOrigin }, + } + ); +} + +#[test] +fn publish_without_origin_fails() { + // Allow unpaid execution from Parachain(1000) + AllowUnpaidFrom::set(vec![Parachain(1000).into()]); + + let data = test_publish_data(vec![(b"key1", b"value1")]); + + let message = Xcm::(vec![ClearOrigin, Publish { data }]); + let mut hash = fake_message_hash(&message); + let weight_limit = Weight::from_parts(20, 20); + + let r = XcmExecutor::::prepare_and_execute( + Parachain(1000), + message, + &mut hash, + weight_limit, + Weight::zero(), + ); + + assert_eq!( + r, + Outcome::Incomplete { + used: Weight::from_parts(20, 20), + error: InstructionError { index: 1, error: XcmError::BadOrigin }, + } + ); +} + +#[test] +fn publish_multiple_items_works() { + // Allow unpaid execution from Parachain(1000) + AllowUnpaidFrom::set(vec![Parachain(1000).into()]); + + let data = test_publish_data(vec![ + (b"key1", b"value1"), + (b"key2", b"value2"), + ]); + + let message = Xcm::(vec![Publish { data: data.clone() }]); + let mut hash = fake_message_hash(&message); + let weight_limit = Weight::from_parts(10, 10); + + let r = XcmExecutor::::prepare_and_execute( + Parachain(1000), + message, + &mut hash, + weight_limit, + Weight::zero(), + ); + + assert_eq!(r, Outcome::Complete { used: Weight::from_parts(10, 10) }); + + // Verify all data was published + let published = PublishedData::get(); + let para_data = published.get(&1000).unwrap(); + assert_eq!(para_data.len(), 2); + assert!(para_data.contains(&(b"key1".to_vec(), b"value1".to_vec()))); + assert!(para_data.contains(&(b"key2".to_vec(), b"value2".to_vec()))); +} diff --git a/polkadot/xcm/xcm-executor/src/config.rs b/polkadot/xcm/xcm-executor/src/config.rs index 60a5ed63f32ee..a3cc889c8edeb 100644 --- a/polkadot/xcm/xcm-executor/src/config.rs +++ b/polkadot/xcm/xcm-executor/src/config.rs @@ -15,10 +15,10 @@ // along with Polkadot. If not, see . use crate::traits::{ - AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, DropAssets, EventEmitter, - ExportXcm, FeeManager, HandleHrmpChannelAccepted, HandleHrmpChannelClosing, - HandleHrmpNewChannelOpenRequest, OnResponse, ProcessTransaction, RecordXcm, ShouldExecute, - TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, + AssetExchange, AssetLock, BroadcastHandler, CallDispatcher, ClaimAssets, ConvertOrigin, + DropAssets, EventEmitter, ExportXcm, FeeManager, HandleHrmpChannelAccepted, + HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, OnResponse, ProcessTransaction, + RecordXcm, ShouldExecute, TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, }; use frame_support::{ dispatch::{GetDispatchInfo, Parameter, PostDispatchInfo}, @@ -134,4 +134,6 @@ pub trait Config { type HrmpChannelClosingHandler: HandleHrmpChannelClosing; /// Allows recording the last executed XCM (used by dry-run runtime APIs). type XcmRecorder: RecordXcm; + /// Handler for publish operations on the relay chain. + type BroadcastHandler: BroadcastHandler; } diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 1c569225ce2b6..3e8778e095038 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -33,11 +33,11 @@ use xcm::latest::{prelude::*, AssetTransferFilter}; pub mod traits; use traits::{ - validate_export, AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, - DropAssets, Enact, EventEmitter, ExportXcm, FeeManager, FeeReason, HandleHrmpChannelAccepted, - HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, OnResponse, ProcessTransaction, - Properties, ShouldExecute, TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, - XcmAssetTransfers, + validate_export, AssetExchange, AssetLock, BroadcastHandler, CallDispatcher, ClaimAssets, + ConvertOrigin, DropAssets, Enact, EventEmitter, ExportXcm, FeeManager, FeeReason, + HandleHrmpChannelAccepted, HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, + OnResponse, ProcessTransaction, Properties, ShouldExecute, TransactAsset, + VersionChangeNotifier, WeightBounds, WeightTrader, XcmAssetTransfers, }; pub use traits::RecordXcm; @@ -1819,6 +1819,11 @@ impl XcmExecutor { Config::TransactionalProcessor::process(|| { Config::HrmpChannelClosingHandler::handle(initiator, sender, recipient) }), + Publish { data } => { + let origin = self.origin_ref().ok_or(XcmError::BadOrigin)?; + Config::BroadcastHandler::handle_publish(origin, data)?; + Ok(()) + }, } } diff --git a/polkadot/xcm/xcm-executor/src/tests/mock.rs b/polkadot/xcm/xcm-executor/src/tests/mock.rs index c8fb6dd9ebf39..e0521b2a3d940 100644 --- a/polkadot/xcm/xcm-executor/src/tests/mock.rs +++ b/polkadot/xcm/xcm-executor/src/tests/mock.rs @@ -328,4 +328,5 @@ impl Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } diff --git a/polkadot/xcm/xcm-executor/src/traits/broadcast_handler.rs b/polkadot/xcm/xcm-executor/src/traits/broadcast_handler.rs new file mode 100644 index 0000000000000..0a2aab7c7ae0e --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/traits/broadcast_handler.rs @@ -0,0 +1,34 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Traits for handling publish operations in XCM. + +use xcm::latest::{Location, PublishData, Result as XcmResult}; + +/// Trait for handling publish operations on the relay chain. +pub trait BroadcastHandler { + /// Handle publish operation from the given origin. + /// Should validate origin authorization and extract necessary data. + fn handle_publish(origin: &Location, data: PublishData) -> XcmResult; +} + +/// Implementation of `BroadcastHandler` for the unit type `()`. +impl BroadcastHandler for () { + fn handle_publish(_origin: &Location, _data: PublishData) -> XcmResult { + // No-op implementation for unit type + Ok(()) + } +} diff --git a/polkadot/xcm/xcm-executor/src/traits/mod.rs b/polkadot/xcm/xcm-executor/src/traits/mod.rs index 038de83e3fa37..af034e1918859 100644 --- a/polkadot/xcm/xcm-executor/src/traits/mod.rs +++ b/polkadot/xcm/xcm-executor/src/traits/mod.rs @@ -50,6 +50,8 @@ mod hrmp; pub use hrmp::{ HandleHrmpChannelAccepted, HandleHrmpChannelClosing, HandleHrmpNewChannelOpenRequest, }; +mod broadcast_handler; +pub use broadcast_handler::BroadcastHandler; mod event_emitter; mod record_xcm; mod weight; From 522c7c6d33e3a2623882b1c79b26bef7a01269f6 Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 10 Dec 2025 09:59:54 -0300 Subject: [PATCH 13/69] feat: unit for runtimes --- .../runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs | 6 ++++++ .../runtimes/assets/asset-hub-rococo/src/xcm_config.rs | 1 + .../assets/asset-hub-westend/src/weights/xcm/mod.rs | 6 ++++++ .../runtimes/assets/asset-hub-westend/src/xcm_config.rs | 1 + .../bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs | 6 ++++++ .../bridge-hubs/bridge-hub-rococo/src/xcm_config.rs | 1 + .../bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs | 6 ++++++ .../bridge-hubs/bridge-hub-westend/src/xcm_config.rs | 1 + .../collectives/collectives-westend/src/weights/xcm/mod.rs | 6 ++++++ .../collectives/collectives-westend/src/xcm_config.rs | 1 + .../coretime/coretime-westend/src/weights/xcm/mod.rs | 6 ++++++ .../runtimes/coretime/coretime-westend/src/xcm_config.rs | 1 + .../runtimes/glutton/glutton-westend/src/xcm_config.rs | 1 + .../runtimes/people/people-westend/src/weights/xcm/mod.rs | 6 ++++++ .../runtimes/people/people-westend/src/xcm_config.rs | 1 + .../parachains/runtimes/testing/penpal/src/xcm_config.rs | 1 + .../testing/yet-another-parachain/src/xcm_config.rs | 1 + polkadot/runtime/rococo/src/weights/xcm/mod.rs | 6 ++++++ polkadot/runtime/rococo/src/xcm_config.rs | 1 + polkadot/runtime/westend/src/weights/xcm/mod.rs | 6 ++++++ polkadot/runtime/westend/src/xcm_config.rs | 1 + 21 files changed, 66 insertions(+) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs index 3dc3e82a62ff9..caed2e572535b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -271,4 +271,10 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 66ffddf5c8339..5fc891d3c14be 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -416,6 +416,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index 27532ac431e7a..aec703f6641b1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -302,4 +302,10 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index efeca0fede196..159f7517f60cd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -472,6 +472,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index 21708ec743821..708ca3814932c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -272,4 +272,10 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 8a661ed53236e..f316e7437736c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -240,6 +240,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } pub type PriceForParentDelivery = diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs index 3706bfe22a3c8..69aa5aea19eb7 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -272,4 +272,10 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index d1b1e78ef8343..0d08e25f911a4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -251,6 +251,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } pub type PriceForParentDelivery = diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs index 7c44ce449383f..00a7f5631f51d 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs @@ -270,4 +270,10 @@ impl XcmWeightInfo for CollectivesWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index b3a7f2bd9af05..91c67bc8e641c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -258,6 +258,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index 75e0908cb395d..da47e67b56a8e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -269,4 +269,10 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index 391972f24572c..1fe19b7c953b4 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -272,6 +272,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs index f32cb211444c2..53dc0c85c2422 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -98,6 +98,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } impl cumulus_pallet_xcm::Config for Runtime { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index a7d394b603b2f..431ff33f918f3 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -268,4 +268,10 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index e5203f39c8814..c41a07142be97 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -278,6 +278,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Converts a local signed origin into an XCM location. Forms the basis for local origins diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index f8a9cdbdf56c8..e92e8c8b49166 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -441,6 +441,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// Multiplier used for dedicated `TakeFirstAssetTrader` with `ForeignAssets` instance. diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/xcm_config.rs index c1b83f5dbd74e..4d783f6fe6739 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/xcm_config.rs @@ -165,6 +165,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = PolkadotXcm; + type BroadcastHandler = (); } /// No local origins on this chain are allowed to dispatch XCM sends/executions. diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index 36d818a87445d..d99310f1b8240 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -305,6 +305,12 @@ impl XcmWeightInfo for RococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } #[test] diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 87fc99eb32ad7..5b6654438fa62 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -227,6 +227,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = XcmPallet; + type BroadcastHandler = (); } parameter_types! { diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index ba4502e228420..46fdbab20df3d 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -307,6 +307,12 @@ impl XcmWeightInfo for WestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } + fn publish(data: &PublishData) -> Weight { + // TODO: Generate proper weights via benchmarking + // For now, use a conservative estimate: base weight + per-item weight + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + } } #[test] diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index a758d030de7de..67e838c843cb0 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -236,6 +236,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = XcmPallet; + type BroadcastHandler = (); } parameter_types! { From c0dd737a1e955d4cc3d8da9f0b2d3c62c7477604 Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 10 Dec 2025 11:37:14 -0300 Subject: [PATCH 14/69] feat: pallet broadcaster impl --- .../src/broadcaster/benchmarking.rs | 158 ++++ .../runtime/parachains/src/broadcaster/mod.rs | 688 ++++++++++++++++ .../parachains/src/broadcaster/tests.rs | 738 ++++++++++++++++++ .../parachains/src/broadcaster/traits.rs | 29 + .../parachains/src/broadcaster/weights.rs | 67 ++ .../runtime/parachains/src/initializer.rs | 21 + polkadot/runtime/parachains/src/lib.rs | 1 + polkadot/runtime/parachains/src/mock.rs | 29 +- polkadot/runtime/rococo/src/lib.rs | 1 + polkadot/runtime/westend/src/lib.rs | 1 + 10 files changed, 1732 insertions(+), 1 deletion(-) create mode 100644 polkadot/runtime/parachains/src/broadcaster/benchmarking.rs create mode 100644 polkadot/runtime/parachains/src/broadcaster/mod.rs create mode 100644 polkadot/runtime/parachains/src/broadcaster/tests.rs create mode 100644 polkadot/runtime/parachains/src/broadcaster/traits.rs create mode 100644 polkadot/runtime/parachains/src/broadcaster/weights.rs diff --git a/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs new file mode 100644 index 0000000000000..1cb99bcb68a3d --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs @@ -0,0 +1,158 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +#![cfg(feature = "runtime-benchmarks")] + +use super::{Pallet as Broadcaster, *}; +use frame_benchmarking::v2::*; +use frame_support::traits::fungible::{Inspect as FunInspect, Mutate}; +use polkadot_primitives::Id as ParaId; + +type BalanceOf = + <::Currency as FunInspect<::AccountId>>::Balance; + +#[benchmarks] +mod benchmarks { + use super::*; + use frame_system::RawOrigin; + + #[benchmark] + fn register_publisher() { + let caller: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(2000); + let deposit = T::PublisherDeposit::get(); + + T::Currency::set_balance(&caller, deposit * 2u32.into()); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), para_id); + + assert!(RegisteredPublishers::::contains_key(para_id)); + } + + #[benchmark] + fn force_register_publisher() { + let manager: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(1000); + let deposit = BalanceOf::::from(0u32); + + #[extrinsic_call] + _(RawOrigin::Root, manager.clone(), deposit, para_id); + + assert!(RegisteredPublishers::::contains_key(para_id)); + } + + #[benchmark] + fn cleanup_published_data(k: Linear<1, { T::MaxStoredKeys::get() }>) { + let caller: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(2000); + let deposit = T::PublisherDeposit::get(); + + T::Currency::set_balance(&caller, deposit * 2u32.into()); + Broadcaster::::register_publisher(RawOrigin::Signed(caller.clone()).into(), para_id) + .unwrap(); + + // Publish k keys + let mut data = Vec::new(); + for i in 0..k { + let mut key = b"key_".to_vec(); + key.extend_from_slice(&i.to_be_bytes()); + data.push((key, b"value".to_vec())); + } + Broadcaster::::handle_publish(para_id, data).unwrap(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), para_id); + + assert!(!PublisherExists::::get(para_id)); + } + + #[benchmark] + fn deregister_publisher() { + let caller: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(2000); + let deposit = T::PublisherDeposit::get(); + + T::Currency::set_balance(&caller, deposit * 2u32.into()); + Broadcaster::::register_publisher(RawOrigin::Signed(caller.clone()).into(), para_id) + .unwrap(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), para_id); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + } + + #[benchmark] + fn force_deregister_publisher(k: Linear<0, { T::MaxStoredKeys::get() }>) { + let manager: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(2000); + let deposit = T::PublisherDeposit::get(); + + T::Currency::set_balance(&manager, deposit * 2u32.into()); + Broadcaster::::register_publisher(RawOrigin::Signed(manager).into(), para_id) + .unwrap(); + + // Publish k keys (if k > 0) + if k > 0 { + let mut data = Vec::new(); + for i in 0..k { + let mut key = b"key_".to_vec(); + key.extend_from_slice(&i.to_be_bytes()); + data.push((key, b"value".to_vec())); + } + Broadcaster::::handle_publish(para_id, data).unwrap(); + } + + #[extrinsic_call] + _(RawOrigin::Root, para_id); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + } + + #[benchmark] + fn do_cleanup_publisher(k: Linear<1, { T::MaxStoredKeys::get() }>) { + let caller: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(2000); + let deposit = T::PublisherDeposit::get(); + + T::Currency::set_balance(&caller, deposit * 2u32.into()); + Broadcaster::::register_publisher(RawOrigin::Signed(caller).into(), para_id) + .unwrap(); + + // Publish k keys + let mut data = Vec::new(); + for i in 0..k { + let mut key = b"key_".to_vec(); + key.extend_from_slice(&i.to_be_bytes()); + data.push((key, b"value".to_vec())); + } + Broadcaster::::handle_publish(para_id, data).unwrap(); + + #[block] + { + Broadcaster::::do_cleanup_publisher(para_id).unwrap(); + } + + assert!(!PublisherExists::::get(para_id)); + } + + impl_benchmark_test_suite!( + Broadcaster, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test + ); +} diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs new file mode 100644 index 0000000000000..50d7657ac5577 --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -0,0 +1,688 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Broadcaster pallet for managing parachain data publishing and subscription. +//! +//! This pallet provides a publish-subscribe mechanism for parachains to efficiently share data +//! through the relay chain storage using child tries per publisher. +//! +//! ## Publisher Registration +//! +//! Parachains must register before they can publish data: +//! +//! - System parachains (ID < 2000): Registered via `force_register_publisher` (Root origin) +//! with custom deposit amounts (typically zero). +//! - Public parachains (ID >= 2000): Registered via `register_publisher` requiring a deposit. +//! +//! The deposit is held using the native fungible traits with the `PublisherDeposit` hold reason. +//! +//! ## Storage Organization +//! +//! Each publisher gets a dedicated child trie identified by `(b"pubsub", ParaId)`. The child +//! trie root is stored on-chain and can be included in storage proofs for subscribers to verify +//! published data. +//! +//! ## Storage Lifecycle +//! +//! Publishers can deregister to reclaim their deposit and remove their data: +//! +//! 1. Call `cleanup_published_data` to remove all published key-value pairs from the child trie +//! 2. Call `deregister_publisher` to release the deposit and complete deregistration +//! +//! Root can force deregistration with `force_deregister_publisher`, which removes all data +//! and releases the deposit in a single call. + +use alloc::vec::Vec; +use codec::{Decode, Encode}; +use frame_support::{ + pallet_prelude::*, + storage::child::ChildInfo, + traits::{ + defensive_prelude::*, + fungible::{ + hold::{Balanced as FunHoldBalanced, Mutate as FunHoldMutate}, + Inspect as FunInspect, Mutate as FunMutate, + }, + tokens::Precision::Exact, + Get, + }, +}; +use frame_system::{ensure_root, ensure_signed, pallet_prelude::BlockNumberFor}; +use polkadot_primitives::Id as ParaId; +use scale_info::TypeInfo; +use sp_runtime::{traits::Zero, RuntimeDebug}; + +pub use pallet::*; + +mod traits; +pub use traits::Publish; + +pub mod weights; +pub use weights::WeightInfo; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +#[cfg(test)] +mod tests; + +/// Information about a registered publisher. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct PublisherInfo { + /// The account that registered and manages this publisher. + pub manager: AccountId, + /// The amount held as deposit for registration. + pub deposit: Balance, +} + + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_system::pallet_prelude::*; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + /// Reasons for the pallet placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// The funds are held as deposit for publisher registration. + #[codec(index = 0)] + PublisherDeposit, + } + + type BalanceOf = + <::Currency as FunInspect<::AccountId>>::Balance; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// Currency mechanism for managing publisher deposits. + type Currency: FunHoldMutate + + FunMutate + + FunHoldBalanced; + + /// Overarching hold reason. + type RuntimeHoldReason: From; + + /// Weight information for extrinsics and operations. + type WeightInfo: WeightInfo; + + /// Maximum number of items that can be published in a single operation. + /// + /// Must not exceed `xcm::v5::MaxPublishItems`. + #[pallet::constant] + type MaxPublishItems: Get; + + /// Maximum length of a published key in bytes. + /// + /// Must not exceed `xcm::v5::MaxPublishKeyLength`. + #[pallet::constant] + type MaxKeyLength: Get; + + /// Maximum length of a published value in bytes. + /// + /// Must not exceed `xcm::v5::MaxPublishValueLength`. + #[pallet::constant] + type MaxValueLength: Get; + + /// Maximum number of unique keys a publisher can store. + #[pallet::constant] + type MaxStoredKeys: Get; + + /// Maximum number of parachains that can register as publishers. + #[pallet::constant] + type MaxPublishers: Get; + + /// The deposit required for a parachain to register as a publisher. + /// + /// System parachains may use `force_register_publisher` with a custom deposit amount. + #[pallet::constant] + type PublisherDeposit: Get>; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Data published by a parachain. + DataPublished { publisher: ParaId, items_count: u32 }, + /// A publisher has been registered. + PublisherRegistered { para_id: ParaId, manager: T::AccountId }, + /// A publisher has been deregistered. + PublisherDeregistered { para_id: ParaId }, + /// Published data has been cleaned up. + DataCleanedUp { para_id: ParaId }, + } + + /// Registered publishers and their deposit information. + /// + /// Parachains must be registered before they can publish data. The registration includes + /// information about the managing account and the deposit held for the registration. + #[pallet::storage] + pub type RegisteredPublishers = StorageMap< + _, + Twox64Concat, + ParaId, + PublisherInfo>, + OptionQuery, + >; + + /// Tracks which parachains have published data. + /// + /// Maps parachain ID to a boolean indicating whether they have a child trie. + /// The actual child trie info is derived deterministically from the ParaId. + #[pallet::storage] + pub type PublisherExists = StorageMap< + _, + Twox64Concat, + ParaId, + bool, + ValueQuery, + >; + + /// Tracks all published keys per parachain. + #[pallet::storage] + pub type PublishedKeys = StorageMap< + _, + Twox64Concat, + ParaId, + BoundedBTreeSet, T::MaxStoredKeys>, + ValueQuery, + >; + + + #[pallet::error] + pub enum Error { + /// Too many items in a single publish operation. + TooManyPublishItems, + /// Key length exceeds maximum allowed. + KeyTooLong, + /// Value length exceeds maximum allowed. + ValueTooLong, + /// Too many unique keys stored for this publisher. + TooManyStoredKeys, + /// Maximum number of publishers reached. + TooManyPublishers, + /// Para is not registered as a publisher. + NotRegistered, + /// Para is already registered as a publisher. + AlreadyRegistered, + /// Cannot publish without being registered first. + PublishNotAuthorized, + /// Caller is not authorized to perform this action. + NotAuthorized, + /// Cannot deregister while published data exists. Call cleanup_published_data first. + MustCleanupDataFirst, + /// No published data to cleanup. + NoDataToCleanup, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + assert!( + T::MaxPublishItems::get() <= xcm::v5::MaxPublishItems::get(), + "Broadcaster MaxPublishItems exceeds XCM MaxPublishItems upper bound" + ); + assert!( + T::MaxKeyLength::get() <= xcm::v5::MaxPublishKeyLength::get(), + "Broadcaster MaxKeyLength exceeds XCM MaxPublishKeyLength upper bound" + ); + assert!( + T::MaxValueLength::get() <= xcm::v5::MaxPublishValueLength::get(), + "Broadcaster MaxValueLength exceeds XCM MaxPublishValueLength upper bound" + ); + } + } + + #[pallet::call] + impl Pallet { + /// Register a parachain as a publisher with the calling account as manager. + /// + /// Requires `PublisherDeposit` to be held from the caller's account. + /// + /// Parameters: + /// - `origin`: Signed origin that will become the publisher manager and pay the deposit. + /// - `para_id`: The parachain to register as a publisher. + /// + /// Errors: + /// - `AlreadyRegistered` + /// - `InsufficientBalance` (from Currency trait) + /// + /// Events: + /// - `PublisherRegistered` + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::register_publisher())] + pub fn register_publisher( + origin: OriginFor, + para_id: ParaId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::do_register_publisher(who, para_id, T::PublisherDeposit::get()) + } + + /// Register a parachain as a publisher with a custom deposit amount. + /// + /// Allows Root to register system parachains with zero or reduced deposits. + /// + /// Parameters: + /// - `origin`: Root origin. + /// - `manager`: Account that will manage the publisher. + /// - `deposit`: Custom deposit amount to hold (typically zero for system parachains). + /// - `para_id`: The parachain to register as a publisher. + /// + /// Errors: + /// - `AlreadyRegistered` + /// - `InsufficientBalance` (from Currency trait if deposit is non-zero) + /// + /// Events: + /// - `PublisherRegistered` + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::force_register_publisher())] + pub fn force_register_publisher( + origin: OriginFor, + manager: T::AccountId, + deposit: BalanceOf, + para_id: ParaId, + ) -> DispatchResult { + ensure_root(origin)?; + Self::do_register_publisher(manager, para_id, deposit) + } + + /// Remove all published data for a parachain. + /// + /// Must be called before `deregister_publisher`. Only callable by the publisher manager. + /// + /// Parameters: + /// - `origin`: Signed origin, must be the publisher manager. + /// - `para_id`: The parachain to clean up. + /// + /// Errors: + /// - `NotRegistered` + /// - `NotAuthorized` + /// - `NoDataToCleanup` + /// + /// Events: + /// - `DataCleanedUp` + #[pallet::call_index(2)] + #[pallet::weight(T::WeightInfo::cleanup_published_data(T::MaxStoredKeys::get()))] + pub fn cleanup_published_data( + origin: OriginFor, + para_id: ParaId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let info = RegisteredPublishers::::get(para_id) + .ok_or(Error::::NotRegistered)?; + + ensure!(who == info.manager, Error::::NotAuthorized); + ensure!(PublisherExists::::get(para_id), Error::::NoDataToCleanup); + + Self::do_cleanup_publisher(para_id)?; + + Self::deposit_event(Event::DataCleanedUp { para_id }); + Ok(()) + } + + /// Deregister a publisher and release their deposit. + /// + /// All published data must be cleaned up first via `cleanup_published_data`. + /// + /// Parameters: + /// - `origin`: Signed origin, must be the publisher manager. + /// - `para_id`: The parachain to deregister. + /// + /// Errors: + /// - `NotRegistered` + /// - `NotAuthorized` + /// - `MustCleanupDataFirst` + /// + /// Events: + /// - `PublisherDeregistered` + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::deregister_publisher())] + pub fn deregister_publisher( + origin: OriginFor, + para_id: ParaId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + let info = RegisteredPublishers::::get(para_id) + .ok_or(Error::::NotRegistered)?; + + ensure!(who == info.manager, Error::::NotAuthorized); + ensure!(!PublisherExists::::get(para_id), Error::::MustCleanupDataFirst); + + Self::do_deregister(para_id, info)?; + + Self::deposit_event(Event::PublisherDeregistered { para_id }); + Ok(()) + } + + /// Force deregister a publisher, cleaning up data if necessary. + /// + /// Combines cleanup and deregistration in a single call. Only callable by Root. + /// + /// Parameters: + /// - `origin`: Root origin. + /// - `para_id`: The parachain to force deregister. + /// + /// Errors: + /// - `NotRegistered` + /// + /// Events: + /// - `DataCleanedUp` (if data existed) + /// - `PublisherDeregistered` + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::force_deregister_publisher(T::MaxStoredKeys::get()))] + pub fn force_deregister_publisher( + origin: OriginFor, + para_id: ParaId, + ) -> DispatchResult { + ensure_root(origin)?; + + let info = RegisteredPublishers::::get(para_id) + .ok_or(Error::::NotRegistered)?; + + // Clean up data if it exists + if PublisherExists::::get(para_id) { + Self::do_cleanup_publisher(para_id)?; + Self::deposit_event(Event::DataCleanedUp { para_id }); + } + + Self::do_deregister(para_id, info)?; + + Self::deposit_event(Event::PublisherDeregistered { para_id }); + Ok(()) + } + } + + impl Pallet { + /// Register a publisher, holding the deposit from the manager account. + fn do_register_publisher( + manager: T::AccountId, + para_id: ParaId, + deposit: BalanceOf, + ) -> DispatchResult { + // Check not already registered + ensure!( + !RegisteredPublishers::::contains_key(para_id), + Error::::AlreadyRegistered + ); + + // Hold the deposit if non-zero + if !deposit.is_zero() { + ::Currency::hold( + &HoldReason::PublisherDeposit.into(), + &manager, + deposit, + )?; + } + + let info = PublisherInfo { manager: manager.clone(), deposit }; + + RegisteredPublishers::::insert(para_id, info); + Self::deposit_event(Event::PublisherRegistered { para_id, manager }); + + Ok(()) + } + + pub(crate) fn do_cleanup_publisher(para_id: ParaId) -> DispatchResult { + let child_info = Self::derive_child_info(para_id); + let published_keys = PublishedKeys::::get(para_id); + + // Remove all key-value pairs from the child trie + for bounded_key in published_keys.iter() { + let key: Vec = bounded_key.clone().into(); + frame_support::storage::child::kill(&child_info, &key); + } + + // Clean up tracking storage + PublishedKeys::::remove(para_id); + PublisherExists::::remove(para_id); + + Ok(()) + } + + fn do_deregister( + para_id: ParaId, + info: PublisherInfo>, + ) -> DispatchResult { + // Release deposit if non-zero + if !info.deposit.is_zero() { + let released = ::Currency::release( + &HoldReason::PublisherDeposit.into(), + &info.manager, + info.deposit, + Exact, + )?; + + defensive_assert!( + released == info.deposit, + "deposit should be fully released" + ); + } + + // Remove registration + RegisteredPublishers::::remove(para_id); + + Ok(()) + } + + /// Called by the initializer to note that a new session has started. + pub(crate) fn initializer_on_new_session( + _notification: &crate::initializer::SessionChangeNotification>, + outgoing_paras: &[ParaId], + ) -> Weight { + Self::cleanup_outgoing_publishers(outgoing_paras) + } + + /// Remove all storage for offboarded parachains. + fn cleanup_outgoing_publishers(outgoing: &[ParaId]) -> Weight { + let mut total_weight = Weight::zero(); + for outgoing_para in outgoing { + total_weight = total_weight.saturating_add(Self::cleanup_outgoing_publisher(outgoing_para)); + } + total_weight + } + + /// Remove all relevant storage items for an outgoing parachain. + fn cleanup_outgoing_publisher(outgoing_para: &ParaId) -> Weight { + if let Some(info) = RegisteredPublishers::::get(outgoing_para) { + let weight = if PublisherExists::::get(outgoing_para) { + let published_keys = PublishedKeys::::get(outgoing_para); + let key_count = published_keys.len() as u32; + let _ = Self::do_cleanup_publisher(*outgoing_para); + T::WeightInfo::do_cleanup_publisher(key_count) + } else { + Weight::zero() + }; + + let _ = Self::do_deregister(*outgoing_para, info); + + // Account for reads (RegisteredPublishers, PublisherExists) and writes (deregister) + return weight + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)); + } + T::DbWeight::get().reads(1) // Just the RegisteredPublishers read + } + + /// Processes a publish operation from a parachain. + /// + /// Validates the publisher is registered, checks all bounds, and stores the provided + /// key-value pairs in the publisher's dedicated child trie. Updates the child trie root + /// and published keys tracking. + pub fn handle_publish( + origin_para_id: ParaId, + data: Vec<(Vec, Vec)>, + ) -> DispatchResult { + // Check publisher is registered + ensure!( + RegisteredPublishers::::contains_key(origin_para_id), + Error::::PublishNotAuthorized + ); + + let items_count = data.len() as u32; + + // Validate input limits first before making any changes + ensure!( + data.len() <= T::MaxPublishItems::get() as usize, + Error::::TooManyPublishItems + ); + + // Validate all keys and values before creating publisher entry + for (key, value) in &data { + ensure!( + key.len() <= T::MaxKeyLength::get() as usize, + Error::::KeyTooLong + ); + ensure!( + value.len() <= T::MaxValueLength::get() as usize, + Error::::ValueTooLong + ); + } + + // Get or create child trie. This checks MaxPublishers limit on first publish. + let child_info = Self::get_or_create_publisher_child_info(origin_para_id)?; + + let mut published_keys = PublishedKeys::::get(origin_para_id); + + // Count new unique keys to prevent exceeding MaxStoredKeys + let mut new_keys_count = 0u32; + for (key, _) in &data { + if let Ok(bounded_key) = BoundedVec::try_from(key.clone()) { + if !published_keys.contains(&bounded_key) { + new_keys_count += 1; + } + } + } + + let current_keys_count = published_keys.len() as u32; + ensure!( + current_keys_count.saturating_add(new_keys_count) <= T::MaxStoredKeys::get(), + Error::::TooManyStoredKeys + ); + + // Write to child trie and track keys for enumeration + for (key, value) in data { + frame_support::storage::child::put(&child_info, &key, &value); + + if let Ok(bounded_key) = BoundedVec::try_from(key) { + published_keys.try_insert(bounded_key).defensive_ok(); + } + } + + PublishedKeys::::insert(origin_para_id, published_keys); + + Self::deposit_event(Event::DataPublished { publisher: origin_para_id, items_count }); + + Ok(()) + } + + /// Returns the child trie root hash for a specific publisher. + /// + /// The root can be included in storage proofs for subscribers to verify published data. + pub fn get_publisher_child_root(para_id: ParaId) -> Option> { + PublisherExists::::get(para_id).then(|| { + let child_info = Self::derive_child_info(para_id); + frame_support::storage::child::root(&child_info, sp_runtime::StateVersion::V1) + }) + } + + /// Gets or creates the child trie info for a publisher. + /// + /// Checks the maximum publishers limit before creating a new publisher entry. + fn get_or_create_publisher_child_info(para_id: ParaId) -> Result { + if !PublisherExists::::contains_key(para_id) { + let current_publisher_count = PublisherExists::::iter().count() as u32; + ensure!( + current_publisher_count < T::MaxPublishers::get(), + Error::::TooManyPublishers + ); + PublisherExists::::insert(para_id, true); + } + Ok(Self::derive_child_info(para_id)) + } + + /// Derives a deterministic child trie identifier from a parachain ID. + /// + /// The child trie identifier is `(b"pubsub", para_id)` encoded. + pub fn derive_child_info(para_id: ParaId) -> ChildInfo { + ChildInfo::new_default(&(b"pubsub", para_id).encode()) + } + + /// Retrieves a value from a publisher's child trie. + /// + /// Returns `None` if the publisher doesn't exist or the key is not found. + pub fn get_published_value(para_id: ParaId, key: &[u8]) -> Option> { + PublisherExists::::get(para_id).then(|| { + let child_info = Self::derive_child_info(para_id); + frame_support::storage::child::get(&child_info, key) + })? + } + + /// Returns all published data for a parachain. + /// + /// Iterates over all tracked keys for the publisher and retrieves their values from the + /// child trie. + pub fn get_all_published_data(para_id: ParaId) -> Vec<(Vec, Vec)> { + if !PublisherExists::::get(para_id) { + return Vec::new(); + } + + let child_info = Self::derive_child_info(para_id); + let published_keys = PublishedKeys::::get(para_id); + + published_keys + .into_iter() + .filter_map(|bounded_key| { + let key: Vec = bounded_key.into(); + frame_support::storage::child::get(&child_info, &key) + .map(|value| (key, value)) + }) + .collect() + } + + /// Returns a list of all parachains that have published data. + pub fn get_all_publishers() -> Vec { + PublisherExists::::iter_keys().collect() + } + } +} + +// Implement Publish trait +impl Publish for Pallet { + fn publish_data(publisher: ParaId, data: Vec<(Vec, Vec)>) -> DispatchResult { + Self::handle_publish(publisher, data) + } +} + +// Implement OnNewSessionOutgoing for cleanup of offboarded parachains +impl crate::initializer::OnNewSessionOutgoing> for Pallet { + fn on_new_session_outgoing( + notification: &crate::initializer::SessionChangeNotification>, + outgoing_paras: &[ParaId], + ) { + let _ = Self::initializer_on_new_session(notification, outgoing_paras); + } +} \ No newline at end of file diff --git a/polkadot/runtime/parachains/src/broadcaster/tests.rs b/polkadot/runtime/parachains/src/broadcaster/tests.rs new file mode 100644 index 0000000000000..297152cdab237 --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/tests.rs @@ -0,0 +1,738 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use crate::mock::{new_test_ext, Balances, Broadcaster, RuntimeOrigin, Test}; +use frame_support::{ + assert_err, assert_ok, + traits::fungible::{hold::Inspect as HoldInspect, Inspect}, +}; +use polkadot_primitives::Id as ParaId; + +const ALICE: u64 = 1; +const BOB: u64 = 2; + +fn setup_account(who: u64, balance: u128) { + let _ = Balances::mint_into(&who, balance); +} + +fn register_test_publisher(para_id: ParaId) { + setup_account(ALICE, 10000); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); +} + +#[test] +fn register_publisher_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 1000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + let info = RegisteredPublishers::::get(para_id).unwrap(); + assert_eq!(info.manager, ALICE); + assert_eq!(info.deposit, 100); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + assert_eq!(Balances::balance(&ALICE), 900); + }); +} + +#[test] +fn force_register_system_chain_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(1000); // System chain + setup_account(ALICE, 1000); + + assert_ok!(Broadcaster::force_register_publisher( + RuntimeOrigin::root(), + ALICE, + 0, + para_id + )); + + let info = RegisteredPublishers::::get(para_id).unwrap(); + assert_eq!(info.manager, ALICE); + assert_eq!(info.deposit, 0); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 0); + assert_eq!(Balances::balance(&ALICE), 1000); + }); +} + +#[test] +fn force_register_with_custom_deposit_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(BOB, 1000); + + assert_ok!(Broadcaster::force_register_publisher( + RuntimeOrigin::root(), + BOB, + 500, + para_id + )); + + let info = RegisteredPublishers::::get(para_id).unwrap(); + assert_eq!(info.manager, BOB); + assert_eq!(info.deposit, 500); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &BOB), 500); + assert_eq!(Balances::balance(&BOB), 500); + }); +} + +#[test] +fn cannot_register_twice() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 1000); + setup_account(BOB, 1000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_err!( + Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id), + Error::::AlreadyRegistered + ); + + assert_err!( + Broadcaster::register_publisher(RuntimeOrigin::signed(BOB), para_id), + Error::::AlreadyRegistered + ); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &BOB), 0); + }); +} + +#[test] +fn force_register_requires_root() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(1000); + setup_account(ALICE, 1000); + + assert_err!( + Broadcaster::force_register_publisher(RuntimeOrigin::signed(ALICE), ALICE, 0, para_id), + sp_runtime::DispatchError::BadOrigin + ); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + }); +} + +#[test] +fn register_publisher_requires_sufficient_balance() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 50); // Less than required deposit + + let result = Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id); + assert!(result.is_err()); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + }); +} + +#[test] +fn publish_requires_registration() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + let data = vec![(b"key".to_vec(), b"value".to_vec())]; + + assert_err!( + Broadcaster::handle_publish(para_id, data), + Error::::PublishNotAuthorized + ); + + assert!(!PublisherExists::::get(para_id)); + }); +} + +#[test] +fn registered_publisher_can_publish() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 1000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + let data = vec![(b"key".to_vec(), b"value".to_vec())]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + + assert_eq!(Broadcaster::get_published_value(para_id, b"key"), Some(b"value".to_vec())); + }); +} + +#[test] +fn publish_store_retrieve_and_update_data() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 1000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert!(!PublisherExists::::get(para_id)); + assert!(Broadcaster::get_publisher_child_root(para_id).is_none()); + + let initial_data = + vec![(b"key1".to_vec(), b"value1".to_vec()), (b"key2".to_vec(), b"value2".to_vec())]; + Broadcaster::handle_publish(para_id, initial_data.clone()).unwrap(); + + assert!(PublisherExists::::get(para_id)); + let root_after_initial = Broadcaster::get_publisher_child_root(para_id); + assert!(root_after_initial.is_some()); + assert!(!root_after_initial.as_ref().unwrap().is_empty()); + + assert_eq!(Broadcaster::get_published_value(para_id, b"key1"), Some(b"value1".to_vec())); + assert_eq!(Broadcaster::get_published_value(para_id, b"key2"), Some(b"value2".to_vec())); + assert_eq!(Broadcaster::get_published_value(para_id, b"key3"), None); + + let update_data = vec![ + (b"key1".to_vec(), b"updated_value1".to_vec()), + (b"key3".to_vec(), b"value3".to_vec()), + ]; + Broadcaster::handle_publish(para_id, update_data).unwrap(); + + let root_after_update = Broadcaster::get_publisher_child_root(para_id); + assert!(root_after_update.is_some()); + assert_ne!(root_after_initial.unwrap(), root_after_update.unwrap()); + + assert_eq!( + Broadcaster::get_published_value(para_id, b"key1"), + Some(b"updated_value1".to_vec()) + ); + assert_eq!( + Broadcaster::get_published_value(para_id, b"key2"), + Some(b"value2".to_vec()) // Should remain unchanged + ); + assert_eq!(Broadcaster::get_published_value(para_id, b"key3"), Some(b"value3".to_vec())); + }); +} + +#[test] +fn empty_publish_still_creates_publisher() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + let _ = Broadcaster::handle_publish(para_id, vec![]); + + assert!(PublisherExists::::get(para_id)); + }); +} + +#[test] +fn handle_publish_respects_max_items_limit() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + let mut data = Vec::new(); + for i in 0..17 { + data.push((format!("key{}", i).into_bytes(), b"value".to_vec())); + } + + let result = Broadcaster::handle_publish(para_id, data); + assert!(result.is_err()); + }); +} + +#[test] +fn handle_publish_respects_key_length_limit() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + let long_key = vec![b'a'; 257]; + let data = vec![(long_key, b"value".to_vec())]; + + let result = Broadcaster::handle_publish(para_id, data); + assert!(result.is_err()); + }); +} + +#[test] +fn handle_publish_respects_value_length_limit() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + let long_value = vec![b'v'; 1025]; + let data = vec![(b"key".to_vec(), long_value)]; + + let result = Broadcaster::handle_publish(para_id, data); + assert!(result.is_err()); + }); +} + +#[test] +fn max_stored_keys_limit_enforced() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + for batch in 0..7 { + let mut data = Vec::new(); + for i in 0..16 { + let key_num = batch * 16 + i; + if key_num < 100 { + data.push((format!("key{}", key_num).into_bytes(), b"value".to_vec())); + } + } + if !data.is_empty() { + assert_ok!(Broadcaster::handle_publish(para_id, data)); + } + } + + let published_keys = PublishedKeys::::get(para_id); + assert_eq!(published_keys.len(), 100); + + let result = + Broadcaster::handle_publish(para_id, vec![(b"new_key".to_vec(), b"value".to_vec())]); + assert_err!(result, Error::::TooManyStoredKeys); + + let result = Broadcaster::handle_publish( + para_id, + vec![(b"key0".to_vec(), b"updated_value".to_vec())], + ); + assert_ok!(result); + + assert_eq!( + Broadcaster::get_published_value(para_id, b"key0"), + Some(b"updated_value".to_vec()) + ); + }); +} + +#[test] +fn published_keys_storage_matches_child_trie() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + // Publish multiple batches to ensure consistency maintained across updates + let data1 = vec![ + (b"key1".to_vec(), b"value1".to_vec()), + (b"key2".to_vec(), b"value2".to_vec()), + ]; + Broadcaster::handle_publish(para_id, data1).unwrap(); + + // Update some keys, add new ones + let data2 = vec![ + (b"key1".to_vec(), b"updated_value1".to_vec()), + (b"key3".to_vec(), b"value3".to_vec()), + ]; + Broadcaster::handle_publish(para_id, data2).unwrap(); + + let tracked_keys = PublishedKeys::::get(para_id); + let actual_data = Broadcaster::get_all_published_data(para_id); + + // Counts must match + assert_eq!(tracked_keys.len(), actual_data.len()); + + // Every tracked key must exist in child trie + for tracked_key in tracked_keys.iter() { + let key: Vec = tracked_key.clone().into(); + assert!(actual_data.iter().any(|(k, _)| k == &key)); + } + + // Every child trie key must be tracked + for (actual_key, _) in actual_data.iter() { + assert!(tracked_keys.iter().any(|tracked| { + let k: Vec = tracked.clone().into(); + &k == actual_key + })); + } + }); +} + +#[test] +fn multiple_publishers_in_same_block() { + new_test_ext(Default::default()).execute_with(|| { + let para1 = ParaId::from(2000); + let para2 = ParaId::from(2001); + let para3 = ParaId::from(2002); + + // Register all publishers + register_test_publisher(para1); + setup_account(BOB, 10000); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(BOB), para2)); + setup_account(3, 10000); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(3), para3)); + + // Multiple parachains publish data in the same block + let data1 = vec![(b"key1".to_vec(), b"value1".to_vec())]; + let data2 = vec![(b"key2".to_vec(), b"value2".to_vec())]; + let data3 = vec![(b"key3".to_vec(), b"value3".to_vec())]; + + Broadcaster::handle_publish(para1, data1).unwrap(); + Broadcaster::handle_publish(para2, data2).unwrap(); + Broadcaster::handle_publish(para3, data3).unwrap(); + + // Verify all three publishers exist + assert!(PublisherExists::::get(para1)); + assert!(PublisherExists::::get(para2)); + assert!(PublisherExists::::get(para3)); + + // Verify each para's data is independently accessible + assert_eq!(Broadcaster::get_published_value(para1, b"key1"), Some(b"value1".to_vec())); + assert_eq!(Broadcaster::get_published_value(para2, b"key2"), Some(b"value2".to_vec())); + assert_eq!(Broadcaster::get_published_value(para3, b"key3"), Some(b"value3".to_vec())); + + // Verify no cross-contamination + assert_eq!(Broadcaster::get_published_value(para1, b"key2"), None); + assert_eq!(Broadcaster::get_published_value(para2, b"key3"), None); + assert_eq!(Broadcaster::get_published_value(para3, b"key1"), None); + }); +} + +#[test] +fn max_publishers_limit_enforced() { + new_test_ext(Default::default()).execute_with(|| { + // Register and publish for max publishers + for i in 0..1000 { + let para_id = ParaId::from(2000 + i); + setup_account(100 + i as u64, 10000); + assert_ok!(Broadcaster::register_publisher( + RuntimeOrigin::signed(100 + i as u64), + para_id + )); + let data = vec![(b"key".to_vec(), b"value".to_vec())]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + } + + assert_eq!(PublisherExists::::iter().count(), 1000); + + // Cannot register new publisher when limit reached + let new_para = ParaId::from(3000); + setup_account(ALICE, 10000); + let data = vec![(b"key".to_vec(), b"value".to_vec())]; + + // Registration should fail due to max publishers + // (registration checks this in get_or_create_publisher_child_info) + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), new_para)); + assert_err!(Broadcaster::handle_publish(new_para, data), Error::::TooManyPublishers); + + // Existing publisher can still update + let existing_para = ParaId::from(2000); + let update_data = vec![(b"key".to_vec(), b"updated".to_vec())]; + assert_ok!(Broadcaster::handle_publish(existing_para, update_data)); + assert_eq!( + Broadcaster::get_published_value(existing_para, b"key"), + Some(b"updated".to_vec()) + ); + }); +} + +#[test] +fn cleanup_published_data_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + let data = vec![ + (b"key1".to_vec(), b"value1".to_vec()), + (b"key2".to_vec(), b"value2".to_vec()), + ]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + + assert!(PublisherExists::::get(para_id)); + assert_eq!(PublishedKeys::::get(para_id).len(), 2); + + assert_ok!(Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id)); + + assert!(!PublisherExists::::get(para_id)); + assert_eq!(PublishedKeys::::get(para_id).len(), 0); + assert_eq!(Broadcaster::get_published_value(para_id, b"key1"), None); + assert_eq!(Broadcaster::get_published_value(para_id, b"key2"), None); + assert!(RegisteredPublishers::::get(para_id).is_some()); + }); +} + +#[test] +fn cleanup_requires_manager() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + setup_account(BOB, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(b"key".to_vec(), b"value".to_vec())])); + + assert_err!( + Broadcaster::cleanup_published_data(RuntimeOrigin::signed(BOB), para_id), + Error::::NotAuthorized + ); + + assert!(PublisherExists::::get(para_id)); + }); +} + +#[test] +fn cleanup_fails_if_no_data() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_err!( + Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id), + Error::::NoDataToCleanup + ); + }); +} + +#[test] +fn cleanup_fails_if_not_registered() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_err!( + Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id), + Error::::NotRegistered + ); + }); +} + +#[test] +fn deregister_publisher_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + assert_eq!(Balances::balance(&ALICE), 9900); + + assert_ok!(Broadcaster::deregister_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 0); + assert_eq!(Balances::balance(&ALICE), 10000); + assert!(!RegisteredPublishers::::contains_key(para_id)); + }); +} + +#[test] +fn deregister_fails_if_data_exists() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(b"key".to_vec(), b"value".to_vec())])); + + assert_err!( + Broadcaster::deregister_publisher(RuntimeOrigin::signed(ALICE), para_id), + Error::::MustCleanupDataFirst + ); + + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + }); +} + +#[test] +fn deregister_requires_manager() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + setup_account(BOB, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_err!( + Broadcaster::deregister_publisher(RuntimeOrigin::signed(BOB), para_id), + Error::::NotAuthorized + ); + }); +} + +#[test] +fn two_phase_cleanup_and_deregister_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + let data = vec![ + (b"key1".to_vec(), b"value1".to_vec()), + (b"key2".to_vec(), b"value2".to_vec()), + (b"key3".to_vec(), b"value3".to_vec()), + ]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + + // Phase 1: Cleanup data + assert_ok!(Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id)); + assert!(!PublisherExists::::get(para_id)); + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 100); + + // Phase 2: Deregister + assert_ok!(Broadcaster::deregister_publisher(RuntimeOrigin::signed(ALICE), para_id)); + assert!(!RegisteredPublishers::::contains_key(para_id)); + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 0); + assert_eq!(Balances::balance(&ALICE), 10000); + }); +} + +#[test] +fn force_deregister_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + let data = vec![ + (b"key1".to_vec(), b"value1".to_vec()), + (b"key2".to_vec(), b"value2".to_vec()), + ]; + assert_ok!(Broadcaster::handle_publish(para_id, data)); + + assert_ok!(Broadcaster::force_deregister_publisher(RuntimeOrigin::root(), para_id)); + + assert!(!PublisherExists::::get(para_id)); + assert!(!RegisteredPublishers::::contains_key(para_id)); + assert_eq!(PublishedKeys::::get(para_id).len(), 0); + assert_eq!(Balances::balance_on_hold(&HoldReason::PublisherDeposit.into(), &ALICE), 0); + assert_eq!(Balances::balance(&ALICE), 10000); + }); +} + +#[test] +fn force_deregister_works_without_data() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + assert_ok!(Broadcaster::force_deregister_publisher(RuntimeOrigin::root(), para_id)); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + assert_eq!(Balances::balance(&ALICE), 10000); + }); +} + +#[test] +fn force_deregister_requires_root() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(b"key".to_vec(), b"value".to_vec())])); + + assert_err!( + Broadcaster::force_deregister_publisher(RuntimeOrigin::signed(ALICE), para_id), + sp_runtime::DispatchError::BadOrigin + ); + + assert!(PublisherExists::::get(para_id)); + assert!(RegisteredPublishers::::contains_key(para_id)); + }); +} + +#[test] +fn cleanup_removes_all_keys_from_child_trie() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); + + // Publish multiple batches to fill up keys + for batch in 0..5 { + let mut data = Vec::new(); + for i in 0..10 { + let key = format!("key_{}_{}", batch, i); + data.push((key.as_bytes().to_vec(), b"value".to_vec())); + } + assert_ok!(Broadcaster::handle_publish(para_id, data)); + } + + assert_eq!(PublishedKeys::::get(para_id).len(), 50); + + assert_ok!(Broadcaster::cleanup_published_data(RuntimeOrigin::signed(ALICE), para_id)); + + for batch in 0..5 { + for i in 0..10 { + let key = format!("key_{}_{}", batch, i); + assert_eq!(Broadcaster::get_published_value(para_id, key.as_bytes()), None); + } + } + + assert_eq!(PublishedKeys::::get(para_id).len(), 0); + }); +} + +#[test] +fn force_deregister_with_zero_deposit() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(1000); // System chain + setup_account(ALICE, 10000); + + assert_ok!(Broadcaster::force_register_publisher( + RuntimeOrigin::root(), + ALICE, + 0, + para_id + )); + + assert_ok!(Broadcaster::handle_publish(para_id, vec![(b"key".to_vec(), b"value".to_vec())])); + + assert_ok!(Broadcaster::force_deregister_publisher(RuntimeOrigin::root(), para_id)); + + assert!(!RegisteredPublishers::::contains_key(para_id)); + assert_eq!(Balances::balance(&ALICE), 10000); // No deposit change + }); +} + +#[test] +fn cleanup_outgoing_publishers_works() { + new_test_ext(Default::default()).execute_with(|| { + let para_a = ParaId::from(2000); + let para_b = ParaId::from(2001); + let para_c = ParaId::from(2002); + + setup_account(ALICE, 10000); + + // Register and publish data for A, B, C + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_a)); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_b)); + assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_c)); + + assert_ok!(Broadcaster::handle_publish(para_a, vec![(b"key1".to_vec(), b"value1".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_b, vec![(b"key2".to_vec(), b"value2".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_c, vec![(b"key3".to_vec(), b"value3".to_vec())])); + + let notification = crate::initializer::SessionChangeNotification::default(); + let outgoing_paras = vec![para_a, para_b]; + Broadcaster::initializer_on_new_session(¬ification, &outgoing_paras); + + // A and B cleaned up + assert!(!RegisteredPublishers::::contains_key(para_a)); + assert!(!RegisteredPublishers::::contains_key(para_b)); + assert!(!PublisherExists::::get(para_a)); + assert!(!PublisherExists::::get(para_b)); + + // C unaffected + assert!(RegisteredPublishers::::contains_key(para_c)); + assert!(PublisherExists::::get(para_c)); + }); +} diff --git a/polkadot/runtime/parachains/src/broadcaster/traits.rs b/polkadot/runtime/parachains/src/broadcaster/traits.rs new file mode 100644 index 0000000000000..10e6ea3bd3172 --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/traits.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Traits for publish/subscribe operations in the broadcaster pallet. + +use alloc::vec::Vec; +use polkadot_primitives::Id as ParaId; +use sp_runtime::DispatchResult; + +/// Trait for handling publish operations for parachains. +/// +/// This trait provides the interface for parachains to publish key-value data. +pub trait Publish { + /// Publish key-value data for a specific parachain. + fn publish_data(publisher: ParaId, data: Vec<(Vec, Vec)>) -> DispatchResult; +} diff --git a/polkadot/runtime/parachains/src/broadcaster/weights.rs b/polkadot/runtime/parachains/src/broadcaster/weights.rs new file mode 100644 index 0000000000000..702eb678f408f --- /dev/null +++ b/polkadot/runtime/parachains/src/broadcaster/weights.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `broadcaster` +//! +//! THIS FILE WAS NOT AUTO-GENERATED. PLACEHOLDER WEIGHTS. +//! TODO: Run benchmarks to generate actual weights. + +use frame_support::weights::Weight; + +/// Weight information for broadcaster operations. +pub trait WeightInfo { + fn register_publisher() -> Weight; + fn force_register_publisher() -> Weight; + fn cleanup_published_data(k: u32) -> Weight; + fn deregister_publisher() -> Weight; + fn force_deregister_publisher(k: u32) -> Weight; + fn do_cleanup_publisher(k: u32) -> Weight; +} + +/// Placeholder weights (to be replaced with benchmarked values). +impl WeightInfo for () { + fn register_publisher() -> Weight { + Weight::from_parts(20_000_000, 0) + .saturating_add(Weight::from_parts(0, 3000)) + } + + fn force_register_publisher() -> Weight { + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 3000)) + } + + fn cleanup_published_data(k: u32) -> Weight { + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(5_000_000, 0).saturating_mul(k.into())) + .saturating_add(Weight::from_parts(0, 5000)) + } + + fn deregister_publisher() -> Weight { + Weight::from_parts(20_000_000, 0) + .saturating_add(Weight::from_parts(0, 3000)) + } + + fn force_deregister_publisher(k: u32) -> Weight { + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(5_000_000, 0).saturating_mul(k.into())) + .saturating_add(Weight::from_parts(0, 5000)) + } + + fn do_cleanup_publisher(k: u32) -> Weight { + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(5_000_000, 0).saturating_mul(k.into())) + } +} diff --git a/polkadot/runtime/parachains/src/initializer.rs b/polkadot/runtime/parachains/src/initializer.rs index 6ee245fb5230c..99d399f62f19c 100644 --- a/polkadot/runtime/parachains/src/initializer.rs +++ b/polkadot/runtime/parachains/src/initializer.rs @@ -70,6 +70,23 @@ impl OnNewSession for () { fn on_new_session(_: &SessionChangeNotification) {} } +/// Handler for session changes with offboarded parachains. +pub trait OnNewSessionOutgoing { + /// Called when a new session starts with parachains being offboarded. + fn on_new_session_outgoing( + notification: &SessionChangeNotification, + outgoing_paras: &[polkadot_primitives::Id], + ); +} + +impl OnNewSessionOutgoing for () { + fn on_new_session_outgoing( + _: &SessionChangeNotification, + _: &[polkadot_primitives::Id], + ) { + } +} + /// Number of validators (not only parachain) in a session. pub type ValidatorSetCount = u32; @@ -134,6 +151,9 @@ pub mod pallet { /// to disable it on the ones that don't support it. Can be removed and replaced by a simple /// bound to `coretime::Config` once all chains support it. type CoretimeOnNewSession: OnNewSession>; + /// Optional handler for outgoing parachains on new session. + /// Use `()` to disable, or configure a pallet that implements `OnNewSessionOutgoing`. + type OnNewSessionOutgoing: OnNewSessionOutgoing>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -282,6 +302,7 @@ impl Pallet { T::SlashingHandler::initializer_on_new_session(session_index); dmp::Pallet::::initializer_on_new_session(¬ification, &outgoing_paras); hrmp::Pallet::::initializer_on_new_session(¬ification, &outgoing_paras); + T::OnNewSessionOutgoing::on_new_session_outgoing(¬ification, &outgoing_paras); T::CoretimeOnNewSession::on_new_session(¬ification); } diff --git a/polkadot/runtime/parachains/src/lib.rs b/polkadot/runtime/parachains/src/lib.rs index 1cd534257d7f9..0c7be5b0f6834 100644 --- a/polkadot/runtime/parachains/src/lib.rs +++ b/polkadot/runtime/parachains/src/lib.rs @@ -24,6 +24,7 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod assigner_coretime; +pub mod broadcaster; pub mod configuration; pub mod coretime; pub mod disputes; diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index cba63ae7b1b04..3c1300dc3d3ce 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -17,7 +17,7 @@ //! Mocks for all the traits. use crate::{ - assigner_coretime, configuration, coretime, disputes, dmp, hrmp, + assigner_coretime, broadcaster, configuration, coretime, disputes, dmp, hrmp, inclusion::{self, AggregateMessageOrigin, UmpQueueId}, initializer, on_demand, origin, paras, paras::ParaKind, @@ -74,6 +74,7 @@ frame_support::construct_runtime!( Paras: paras, Configuration: configuration, ParasShared: shared, + Broadcaster: broadcaster, ParaInclusion: inclusion, ParaInherent: paras_inherent, Scheduler: scheduler, @@ -192,6 +193,7 @@ impl crate::initializer::Config for Test { type ForceOrigin = frame_system::EnsureRoot; type WeightInfo = (); type CoretimeOnNewSession = Coretime; + type OnNewSessionOutgoing = (); } impl crate::configuration::Config for Test { @@ -215,6 +217,31 @@ impl crate::shared::Config for Test { type DisabledValidators = MockDisabledValidators; } +parameter_types! { + pub const MaxPublishItems: u32 = 16; + pub const MaxKeyLength: u32 = 256; + pub const MaxValueLength: u32 = 1024; + pub const MaxStoredKeys: u32 = 100; + pub const MaxPublishers: u32 = 1000; +} + +parameter_types! { + pub const PublisherDeposit: Balance = 100; +} + +impl crate::broadcaster::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type RuntimeHoldReason = RuntimeHoldReason; + type WeightInfo = (); + type MaxPublishItems = MaxPublishItems; + type MaxKeyLength = MaxKeyLength; + type MaxValueLength = MaxValueLength; + type MaxStoredKeys = MaxStoredKeys; + type MaxPublishers = MaxPublishers; + type PublisherDeposit = PublisherDeposit; +} + impl origin::Config for Test {} parameter_types! { diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index abb6f8dd08379..48e30162fe1b0 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1198,6 +1198,7 @@ impl parachains_initializer::Config for Runtime { type ForceOrigin = EnsureRoot; type WeightInfo = weights::polkadot_runtime_parachains_initializer::WeightInfo; type CoretimeOnNewSession = Coretime; + type OnNewSessionOutgoing = (); } impl parachains_disputes::Config for Runtime { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index a87120469145b..544ed7aef499b 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1575,6 +1575,7 @@ impl parachains_initializer::Config for Runtime { type ForceOrigin = EnsureRoot; type WeightInfo = weights::polkadot_runtime_parachains_initializer::WeightInfo; type CoretimeOnNewSession = Coretime; + type OnNewSessionOutgoing = (); } impl paras_sudo_wrapper::Config for Runtime {} From 93914d94885dbd81af26dc8e0508832b9dc46e30 Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 10 Dec 2025 12:28:51 -0300 Subject: [PATCH 15/69] feat: pallet broadcaster clean up and benchmark fixes --- .../src/broadcaster/benchmarking.rs | 78 ++----------------- .../runtime/parachains/src/broadcaster/mod.rs | 45 ++++++----- .../parachains/src/broadcaster/tests.rs | 10 +-- .../parachains/src/broadcaster/traits.rs | 2 +- .../parachains/src/broadcaster/weights.rs | 20 ----- polkadot/runtime/parachains/src/mock.rs | 1 - 6 files changed, 37 insertions(+), 119 deletions(-) diff --git a/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs index 1cb99bcb68a3d..ec852d9db81ff 100644 --- a/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs +++ b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs @@ -56,60 +56,21 @@ mod benchmarks { } #[benchmark] - fn cleanup_published_data(k: Linear<1, { T::MaxStoredKeys::get() }>) { - let caller: T::AccountId = whitelisted_caller(); - let para_id = ParaId::from(2000); - let deposit = T::PublisherDeposit::get(); - - T::Currency::set_balance(&caller, deposit * 2u32.into()); - Broadcaster::::register_publisher(RawOrigin::Signed(caller.clone()).into(), para_id) - .unwrap(); - - // Publish k keys - let mut data = Vec::new(); - for i in 0..k { - let mut key = b"key_".to_vec(); - key.extend_from_slice(&i.to_be_bytes()); - data.push((key, b"value".to_vec())); - } - Broadcaster::::handle_publish(para_id, data).unwrap(); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), para_id); - - assert!(!PublisherExists::::get(para_id)); - } - - #[benchmark] - fn deregister_publisher() { + fn do_cleanup_publisher(k: Linear<1, { T::MaxStoredKeys::get() }>) { let caller: T::AccountId = whitelisted_caller(); let para_id = ParaId::from(2000); let deposit = T::PublisherDeposit::get(); T::Currency::set_balance(&caller, deposit * 2u32.into()); - Broadcaster::::register_publisher(RawOrigin::Signed(caller.clone()).into(), para_id) - .unwrap(); - - #[extrinsic_call] - _(RawOrigin::Signed(caller), para_id); - - assert!(!RegisteredPublishers::::contains_key(para_id)); - } - - #[benchmark] - fn force_deregister_publisher(k: Linear<0, { T::MaxStoredKeys::get() }>) { - let manager: T::AccountId = whitelisted_caller(); - let para_id = ParaId::from(2000); - let deposit = T::PublisherDeposit::get(); - - T::Currency::set_balance(&manager, deposit * 2u32.into()); - Broadcaster::::register_publisher(RawOrigin::Signed(manager).into(), para_id) + Broadcaster::::register_publisher(RawOrigin::Signed(caller).into(), para_id) .unwrap(); - // Publish k keys (if k > 0) - if k > 0 { + // Publish k keys in batches to respect MaxPublishItems limit + let max_items = T::MaxPublishItems::get(); + for batch_start in (0..k).step_by(max_items as usize) { + let batch_end = (batch_start + max_items).min(k); let mut data = Vec::new(); - for i in 0..k { + for i in batch_start..batch_end { let mut key = b"key_".to_vec(); key.extend_from_slice(&i.to_be_bytes()); data.push((key, b"value".to_vec())); @@ -117,31 +78,6 @@ mod benchmarks { Broadcaster::::handle_publish(para_id, data).unwrap(); } - #[extrinsic_call] - _(RawOrigin::Root, para_id); - - assert!(!RegisteredPublishers::::contains_key(para_id)); - } - - #[benchmark] - fn do_cleanup_publisher(k: Linear<1, { T::MaxStoredKeys::get() }>) { - let caller: T::AccountId = whitelisted_caller(); - let para_id = ParaId::from(2000); - let deposit = T::PublisherDeposit::get(); - - T::Currency::set_balance(&caller, deposit * 2u32.into()); - Broadcaster::::register_publisher(RawOrigin::Signed(caller).into(), para_id) - .unwrap(); - - // Publish k keys - let mut data = Vec::new(); - for i in 0..k { - let mut key = b"key_".to_vec(); - key.extend_from_slice(&i.to_be_bytes()); - data.push((key, b"value".to_vec())); - } - Broadcaster::::handle_publish(para_id, data).unwrap(); - #[block] { Broadcaster::::do_cleanup_publisher(para_id).unwrap(); diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index 50d7657ac5577..06db00a38e090 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Broadcaster pallet for managing parachain data publishing and subscription. +//! Broadcaster pallet for managing parachain data publishing. //! -//! This pallet provides a publish-subscribe mechanism for parachains to efficiently share data +//! This pallet provides a publishing mechanism for parachains to efficiently share data //! through the relay chain storage using child tries per publisher. //! //! ## Publisher Registration @@ -32,7 +32,7 @@ //! ## Storage Organization //! //! Each publisher gets a dedicated child trie identified by `(b"pubsub", ParaId)`. The child -//! trie root is stored on-chain and can be included in storage proofs for subscribers to verify +//! trie root is stored on-chain and can be included in storage proofs to verify //! published data. //! //! ## Storage Lifecycle @@ -112,10 +112,7 @@ pub mod pallet { <::Currency as FunInspect<::AccountId>>::Balance; #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - + pub trait Config: frame_system::Config>> { /// Currency mechanism for managing publisher deposits. type Currency: FunHoldMutate + FunMutate @@ -324,7 +321,10 @@ pub mod pallet { /// Events: /// - `DataCleanedUp` #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::cleanup_published_data(T::MaxStoredKeys::get()))] + #[pallet::weight( + T::WeightInfo::do_cleanup_publisher(T::MaxStoredKeys::get()) + .saturating_add(T::DbWeight::get().reads(2)) + )] pub fn cleanup_published_data( origin: OriginFor, para_id: ParaId, @@ -359,7 +359,7 @@ pub mod pallet { /// Events: /// - `PublisherDeregistered` #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::deregister_publisher())] + #[pallet::weight(T::DbWeight::get().reads_writes(2, 1))] pub fn deregister_publisher( origin: OriginFor, para_id: ParaId, @@ -393,7 +393,10 @@ pub mod pallet { /// - `DataCleanedUp` (if data existed) /// - `PublisherDeregistered` #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::force_deregister_publisher(T::MaxStoredKeys::get()))] + #[pallet::weight( + T::WeightInfo::do_cleanup_publisher(T::MaxStoredKeys::get()) + .saturating_add(T::DbWeight::get().reads_writes(2, 1)) + )] pub fn force_deregister_publisher( origin: OriginFor, para_id: ParaId, @@ -429,6 +432,10 @@ pub mod pallet { Error::::AlreadyRegistered ); + // Enforce MaxPublishers limit at registration time + let current_count = RegisteredPublishers::::iter().count() as u32; + ensure!(current_count < T::MaxPublishers::get(), Error::::TooManyPublishers); + // Hold the deposit if non-zero if !deposit.is_zero() { ::Currency::hold( @@ -562,8 +569,8 @@ pub mod pallet { ); } - // Get or create child trie. This checks MaxPublishers limit on first publish. - let child_info = Self::get_or_create_publisher_child_info(origin_para_id)?; + // Get or create child trie for this publisher + let child_info = Self::get_or_create_publisher_child_info(origin_para_id); let mut published_keys = PublishedKeys::::get(origin_para_id); @@ -601,7 +608,7 @@ pub mod pallet { /// Returns the child trie root hash for a specific publisher. /// - /// The root can be included in storage proofs for subscribers to verify published data. + /// The root can be included in storage proofs to verify published data. pub fn get_publisher_child_root(para_id: ParaId) -> Option> { PublisherExists::::get(para_id).then(|| { let child_info = Self::derive_child_info(para_id); @@ -611,17 +618,13 @@ pub mod pallet { /// Gets or creates the child trie info for a publisher. /// - /// Checks the maximum publishers limit before creating a new publisher entry. - fn get_or_create_publisher_child_info(para_id: ParaId) -> Result { + /// Creates the child trie entry on first publish. The MaxPublishers limit is enforced + /// at registration time, so we don't need to check it here. + fn get_or_create_publisher_child_info(para_id: ParaId) -> ChildInfo { if !PublisherExists::::contains_key(para_id) { - let current_publisher_count = PublisherExists::::iter().count() as u32; - ensure!( - current_publisher_count < T::MaxPublishers::get(), - Error::::TooManyPublishers - ); PublisherExists::::insert(para_id, true); } - Ok(Self::derive_child_info(para_id)) + Self::derive_child_info(para_id) } /// Derives a deterministic child trie identifier from a parachain ID. diff --git a/polkadot/runtime/parachains/src/broadcaster/tests.rs b/polkadot/runtime/parachains/src/broadcaster/tests.rs index 297152cdab237..f9df57d650f27 100644 --- a/polkadot/runtime/parachains/src/broadcaster/tests.rs +++ b/polkadot/runtime/parachains/src/broadcaster/tests.rs @@ -420,12 +420,12 @@ fn max_publishers_limit_enforced() { // Cannot register new publisher when limit reached let new_para = ParaId::from(3000); setup_account(ALICE, 10000); - let data = vec![(b"key".to_vec(), b"value".to_vec())]; - // Registration should fail due to max publishers - // (registration checks this in get_or_create_publisher_child_info) - assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), new_para)); - assert_err!(Broadcaster::handle_publish(new_para, data), Error::::TooManyPublishers); + // Registration should fail at registration time due to MaxPublishers limit + assert_err!( + Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), new_para), + Error::::TooManyPublishers + ); // Existing publisher can still update let existing_para = ParaId::from(2000); diff --git a/polkadot/runtime/parachains/src/broadcaster/traits.rs b/polkadot/runtime/parachains/src/broadcaster/traits.rs index 10e6ea3bd3172..6d08887ead1a7 100644 --- a/polkadot/runtime/parachains/src/broadcaster/traits.rs +++ b/polkadot/runtime/parachains/src/broadcaster/traits.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Traits for publish/subscribe operations in the broadcaster pallet. +//! Traits for publish operations in the broadcaster pallet. use alloc::vec::Vec; use polkadot_primitives::Id as ParaId; diff --git a/polkadot/runtime/parachains/src/broadcaster/weights.rs b/polkadot/runtime/parachains/src/broadcaster/weights.rs index 702eb678f408f..65b82bf29dddf 100644 --- a/polkadot/runtime/parachains/src/broadcaster/weights.rs +++ b/polkadot/runtime/parachains/src/broadcaster/weights.rs @@ -25,9 +25,6 @@ use frame_support::weights::Weight; pub trait WeightInfo { fn register_publisher() -> Weight; fn force_register_publisher() -> Weight; - fn cleanup_published_data(k: u32) -> Weight; - fn deregister_publisher() -> Weight; - fn force_deregister_publisher(k: u32) -> Weight; fn do_cleanup_publisher(k: u32) -> Weight; } @@ -43,23 +40,6 @@ impl WeightInfo for () { .saturating_add(Weight::from_parts(0, 3000)) } - fn cleanup_published_data(k: u32) -> Weight { - Weight::from_parts(25_000_000, 0) - .saturating_add(Weight::from_parts(5_000_000, 0).saturating_mul(k.into())) - .saturating_add(Weight::from_parts(0, 5000)) - } - - fn deregister_publisher() -> Weight { - Weight::from_parts(20_000_000, 0) - .saturating_add(Weight::from_parts(0, 3000)) - } - - fn force_deregister_publisher(k: u32) -> Weight { - Weight::from_parts(30_000_000, 0) - .saturating_add(Weight::from_parts(5_000_000, 0).saturating_mul(k.into())) - .saturating_add(Weight::from_parts(0, 5000)) - } - fn do_cleanup_publisher(k: u32) -> Weight { Weight::from_parts(10_000_000, 0) .saturating_add(Weight::from_parts(5_000_000, 0).saturating_mul(k.into())) diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 3c1300dc3d3ce..bd457f5a77811 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -230,7 +230,6 @@ parameter_types! { } impl crate::broadcaster::Config for Test { - type RuntimeEvent = RuntimeEvent; type Currency = Balances; type RuntimeHoldReason = RuntimeHoldReason; type WeightInfo = (); From 65d2f3244be234f78cd3d9913fbd9ce00ddd422e Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 10 Dec 2025 14:32:24 -0300 Subject: [PATCH 16/69] feat: rococo integration and adapter of broadcater handler --- polkadot/runtime/parachains/src/mock.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 26 ++- polkadot/runtime/rococo/src/xcm_config.rs | 3 +- polkadot/runtime/test-runtime/src/lib.rs | 1 + .../pallet-xcm-benchmarks/src/generic/mock.rs | 1 + polkadot/xcm/xcm-builder/Cargo.toml | 4 + .../xcm/xcm-builder/src/broadcast_adapter.rs | 173 ++++++++++++++++++ polkadot/xcm/xcm-builder/src/lib.rs | 3 + 8 files changed, 210 insertions(+), 3 deletions(-) create mode 100644 polkadot/xcm/xcm-builder/src/broadcast_adapter.rs diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index bd457f5a77811..3c66fb3c9fb01 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -219,7 +219,7 @@ impl crate::shared::Config for Test { parameter_types! { pub const MaxPublishItems: u32 = 16; - pub const MaxKeyLength: u32 = 256; + pub const MaxKeyLength: u32 = 32; pub const MaxValueLength: u32 = 1024; pub const MaxStoredKeys: u32 = 100; pub const MaxPublishers: u32 = 1000; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 48e30162fe1b0..6dc11a71b7052 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -66,7 +66,8 @@ use polkadot_runtime_common::{ BlockHashCount, BlockLength, SlowAdjustingFeeUpdate, }; use polkadot_runtime_parachains::{ - assigner_coretime as parachains_assigner_coretime, configuration as parachains_configuration, + assigner_coretime as parachains_assigner_coretime, broadcaster as parachains_broadcaster, + configuration as parachains_configuration, configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, @@ -1225,6 +1226,27 @@ impl parachains_slashing::Config for Runtime { type BenchmarkingConfig = parachains_slashing::BenchConfig<200>; } +parameter_types! { + pub const MaxPublishItems: u32 = 10; + pub const MaxKeyLength: u32 = 32; + pub const MaxValueLength: u32 = 1024; + pub const MaxStoredKeys: u32 = 100; + pub const MaxPublishers: u32 = 1000; + pub const PublisherDeposit: Balance = 100 * UNITS; +} + +impl parachains_broadcaster::Config for Runtime { + type Currency = Balances; + type RuntimeHoldReason = RuntimeHoldReason; + type WeightInfo = (); + type MaxPublishItems = MaxPublishItems; + type MaxKeyLength = MaxKeyLength; + type MaxValueLength = MaxValueLength; + type MaxStoredKeys = MaxStoredKeys; + type MaxPublishers = MaxPublishers; + type PublisherDeposit = PublisherDeposit; +} + parameter_types! { pub const ParaDeposit: Balance = 40 * UNITS; } @@ -1589,6 +1611,7 @@ construct_runtime! { ParaSessionInfo: parachains_session_info = 61, ParasDisputes: parachains_disputes = 62, ParasSlashing: parachains_slashing = 63, + Broadcaster: parachains_broadcaster = 65, MessageQueue: pallet_message_queue = 64, OnDemandAssignmentProvider: parachains_on_demand = 66, CoretimeAssignmentProvider: parachains_assigner_coretime = 68, @@ -1853,6 +1876,7 @@ mod benches { [polkadot_runtime_common::identity_migrator, IdentityMigrator] [polkadot_runtime_common::slots, Slots] [polkadot_runtime_common::paras_registrar, Registrar] + [polkadot_runtime_parachains::broadcaster, Broadcaster] [polkadot_runtime_parachains::configuration, Configuration] [polkadot_runtime_parachains::coretime, Coretime] [polkadot_runtime_parachains::hrmp, Hrmp] diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 5b6654438fa62..65e3931831ead 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -227,7 +227,8 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = XcmPallet; - type BroadcastHandler = (); + type BroadcastHandler = + xcm_builder::ParachainBroadcastAdapter; } parameter_types! { diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 561c1a0769fe4..3b7d86974b157 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -591,6 +591,7 @@ impl parachains_initializer::Config for Runtime { type ForceOrigin = frame_system::EnsureRoot; type WeightInfo = (); type CoretimeOnNewSession = Coretime; + type OnNewSessionOutgoing = (); } impl parachains_session_info::Config for Runtime { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 548baeb13cdb2..0dcaf39ee1ae2 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -112,6 +112,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); + type BroadcastHandler = (); } parameter_types! { diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 32f8f595031ef..15a395d7170d4 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -31,6 +31,8 @@ xcm-executor = { workspace = true } # Polkadot dependencies polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-parachains = { workspace = true } [dev-dependencies] pallet-assets = { workspace = true, default-features = true } @@ -71,6 +73,8 @@ std = [ "pallet-asset-conversion/std", "pallet-transaction-payment/std", "polkadot-parachain-primitives/std", + "polkadot-primitives/std", + "polkadot-runtime-parachains/std", "primitive-types/std", "scale-info/std", "sp-arithmetic/std", diff --git a/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs b/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs new file mode 100644 index 0000000000000..404159c7e36b7 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs @@ -0,0 +1,173 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Adapters for broadcast/publish operations in XCM. + +use alloc::vec::Vec; +use core::marker::PhantomData; +use frame_support::traits::Contains; +use polkadot_primitives::Id as ParaId; +use polkadot_runtime_parachains::broadcaster::Publish; +use xcm::latest::prelude::XcmError; +use xcm::latest::{Junction, Location, PublishData, Result as XcmResult}; +use xcm_executor::traits::BroadcastHandler; + +/// Configurable broadcast adapter that validates parachain origins. +pub struct ParachainBroadcastAdapter(PhantomData<(Filter, Handler)>); + +impl BroadcastHandler for ParachainBroadcastAdapter +where + Filter: Contains, + Handler: Publish, +{ + fn handle_publish(origin: &Location, data: PublishData) -> XcmResult { + // Check if origin is authorized to publish + if !Filter::contains(origin) { + return Err(XcmError::NoPermission); + } + + // Extract parachain ID from authorized origin + let para_id = match origin.unpack() { + (0, [Junction::Parachain(id)]) => ParaId::from(*id), // Direct parachain + (1, [Junction::Parachain(id), ..]) => ParaId::from(*id), // Sibling parachain + _ => return Err(XcmError::BadOrigin), // Should be caught by filter + }; + + // Call the actual handler + let data_vec: Vec<(Vec, Vec)> = data + .into_inner() + .into_iter() + .map(|(k, v)| (k.into_inner(), v.into_inner())) + .collect(); + Handler::publish_data(para_id, data_vec).map_err(|_| XcmError::PublishFailed) + } +} + +/// Allows only direct parachains (parents=0, interior=[Parachain(_)]). +pub struct OnlyParachains; +impl Contains for OnlyParachains { + fn contains(origin: &Location) -> bool { + matches!(origin.unpack(), (0, [Junction::Parachain(_)])) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::parameter_types; + use polkadot_runtime_parachains::broadcaster::Publish; + use sp_runtime::BoundedVec; + use xcm::latest::prelude::XcmError; + use xcm::latest::{ + Junction, Location, MaxPublishKeyLength, MaxPublishValueLength, PublishData, + }; + + // Mock handler that tracks calls + parameter_types! { + pub static PublishCalls: Vec<(ParaId, Vec<(Vec, Vec)>)> = vec![]; + } + + // Helper to create test publish data + fn test_publish_data(items: Vec<(&[u8], &[u8])>) -> PublishData { + items + .into_iter() + .map(|(k, v)| { + ( + BoundedVec::::try_from(k.to_vec()).unwrap(), + BoundedVec::::try_from(v.to_vec()).unwrap(), + ) + }) + .collect::>() + .try_into() + .unwrap() + } + + struct MockPublishHandler; + impl Publish for MockPublishHandler { + fn publish_data( + publisher: ParaId, + data: Vec<(Vec, Vec)>, + ) -> Result<(), sp_runtime::DispatchError> { + let mut calls = PublishCalls::get(); + calls.push((publisher, data)); + PublishCalls::set(calls); + Ok(()) + } + } + + #[test] + fn publish_from_direct_parachain_works() { + PublishCalls::set(vec![]); + let origin = Location::new(0, [Junction::Parachain(1000)]); + let data = test_publish_data(vec![(b"key1", b"value1")]); + + let result = ParachainBroadcastAdapter::::handle_publish( + &origin, + data.clone(), + ); + + assert!(result.is_ok()); + let calls = PublishCalls::get(); + assert_eq!(calls.len(), 1); + assert_eq!(calls[0].0, ParaId::from(1000)); + assert_eq!(calls[0].1, vec![(b"key1".to_vec(), b"value1".to_vec())]); + } + + #[test] + fn publish_from_sibling_parachain_fails() { + PublishCalls::set(vec![]); + let origin = Location::new( + 1, + [Junction::Parachain(2000), Junction::AccountId32 { network: None, id: [1; 32] }], + ); + let data = test_publish_data(vec![(b"key1", b"value1")]); + + let result = ParachainBroadcastAdapter::::handle_publish( + &origin, + data.clone(), + ); + + assert!(matches!(result, Err(XcmError::NoPermission))); + assert!(PublishCalls::get().is_empty()); + } + + #[test] + fn publish_from_non_parachain_fails() { + PublishCalls::set(vec![]); + let origin = Location::here(); + let data = test_publish_data(vec![(b"key1", b"value1")]); + + let result = + ParachainBroadcastAdapter::::handle_publish( + &origin, data, + ); + + assert!(matches!(result, Err(XcmError::NoPermission))); + assert!(PublishCalls::get().is_empty()); + } + + #[test] + fn only_parachains_filter_works() { + // Direct parachain allowed + assert!(OnlyParachains::contains(&Location::new(0, [Junction::Parachain(1000)]))); + + // Sibling parachain not allowed + assert!(!OnlyParachains::contains(&Location::new(1, [Junction::Parachain(1000)]))); + + // Root not allowed + assert!(!OnlyParachains::contains(&Location::here())); + } +} diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 83fb34bd6569f..d01628f4ea20d 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -49,6 +49,9 @@ pub use barriers::{ TakeWeightCredit, TrailingSetTopicAsId, WithComputedOrigin, }; +mod broadcast_adapter; +pub use broadcast_adapter::{OnlyParachains, ParachainBroadcastAdapter}; + mod controller; pub use controller::{ Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, From ad04ed848d71dd0e04dcad8744b3f72d034705e1 Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 10 Dec 2025 15:55:32 -0300 Subject: [PATCH 17/69] feat: test utils and generic xcm benchmark fix --- .../src/generic/benchmarking.rs | 6 ++ .../pallet-xcm-benchmarks/src/generic/mock.rs | 4 +- pubsub-dev/build.sh | 56 +++++++++++++++++++ pubsub-dev/zombienet.toml | 44 +++++++++++++++ 4 files changed, 108 insertions(+), 2 deletions(-) create mode 100755 pubsub-dev/build.sh create mode 100644 pubsub-dev/zombienet.toml diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 199543f4efa76..1f7eca3a73426 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -964,6 +964,7 @@ mod benchmarks { #[benchmark] fn publish(n: Linear<1, { MaxPublishItems::get() }>) -> Result<(), BenchmarkError> { use xcm::latest::{MaxPublishKeyLength, MaxPublishValueLength}; + use xcm_builder::test_utils::PublishedData; // The `Publish` instruction weight scales with the number of items published. // Each item is benchmarked at maximum key and value lengths to represent worst-case @@ -994,6 +995,11 @@ mod benchmarks { executor.bench_process(xcm)?; } + // Verify data was published (origin is Parachain(1000) from mock) + let published = PublishedData::get(); + assert!(published.contains_key(&1000)); + assert!(!published.get(&1000).unwrap().is_empty()); + Ok(()) } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 0dcaf39ee1ae2..f7984782cfac2 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -26,7 +26,7 @@ use sp_runtime::traits::TrailingZeroInput; use xcm_builder::{ test_utils::{ AssetsInHolding, TestAssetExchanger, TestAssetLocker, TestAssetTrap, - TestSubscriptionService, TestUniversalAliases, + TestBroadcastHandler, TestSubscriptionService, TestUniversalAliases, }, AliasForeignAccountId32, AllowUnpaidExecutionFrom, EnsureDecodableXcm, FrameTransactionalProcessor, @@ -112,7 +112,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = (); - type BroadcastHandler = (); + type BroadcastHandler = TestBroadcastHandler; } parameter_types! { diff --git a/pubsub-dev/build.sh b/pubsub-dev/build.sh new file mode 100755 index 0000000000000..dd968f1856fe1 --- /dev/null +++ b/pubsub-dev/build.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +export DYLD_LIBRARY_PATH=/Library/Developer/CommandLineTools/usr/lib + +echo "🔨 Building Polkadot SDK binaries for pubsub XCM v5 testing..." +echo + +# Build main polkadot binary +echo "📦 Building polkadot relay chain binary..." +cargo build --release -p polkadot --bin polkadot +if [ $? -ne 0 ]; then + echo "❌ Failed to build polkadot binary" + exit 1 +fi +echo "✅ polkadot binary built successfully" +echo + +# Build PVF execute worker +echo "📦 Building polkadot-execute-worker..." +cargo build --release -p polkadot --bin polkadot-execute-worker +if [ $? -ne 0 ]; then + echo "❌ Failed to build polkadot-execute-worker" + exit 1 +fi +echo "✅ polkadot-execute-worker built successfully" +echo + +# Build PVF prepare worker +echo "📦 Building polkadot-prepare-worker..." +cargo build --release -p polkadot --bin polkadot-prepare-worker +if [ $? -ne 0 ]; then + echo "❌ Failed to build polkadot-prepare-worker" + exit 1 +fi +echo "✅ polkadot-prepare-worker built successfully" +echo + +# Build parachain binary +echo "📦 Building polkadot-parachain binary..." +cargo build --release -p polkadot-parachain-bin --bin polkadot-parachain +if [ $? -ne 0 ]; then + echo "❌ Failed to build polkadot-parachain binary" + exit 1 +fi +echo "✅ polkadot-parachain binary built successfully" +echo + +echo "🎉 All binaries built successfully!" +echo +echo "📍 Binary locations:" +echo " - Relay chain: target/release/polkadot" +echo " - Execute worker: target/release/polkadot-execute-worker" +echo " - Prepare worker: target/release/polkadot-prepare-worker" +echo " - Parachain: target/release/polkadot-parachain" +echo +echo "🚀 Ready for zombienet testing!" \ No newline at end of file diff --git a/pubsub-dev/zombienet.toml b/pubsub-dev/zombienet.toml new file mode 100644 index 0000000000000..840e8026c5408 --- /dev/null +++ b/pubsub-dev/zombienet.toml @@ -0,0 +1,44 @@ +[relaychain] +chain = "rococo-local" +default_command = "./target/release/polkadot" +default_args = [ "-lparachain=debug", "-lxcm=trace" ] + + [[relaychain.nodes]] + name = "alice" + rpc_port = 9900 + validator = true + args = ["--trie-cache-size=0", "--disable-worker-version-check"] + + [[relaychain.nodes]] + name = "bob" + validator = true + args = ["--trie-cache-size=0", "--disable-worker-version-check"] + + [[relaychain.nodes]] + name = "charlie" + validator = true + args = ["--trie-cache-size=0", "--disable-worker-version-check"] + + [[relaychain.nodes]] + name = "dave" + validator = true + args = ["--trie-cache-size=0", "--disable-worker-version-check"] + +[[parachains]] +id = 1000 +addToGenesis = true +cumulus_based = true + + [[parachains.collators]] + name = "para-collator01" + rpc_port = 9920 + chain = "penpal-local" + command = "./target/release/polkadot-parachain" + args = ["-lxcm=trace", "--trie-cache-size=0", "--network-backend=libp2p", "--", "--network-backend=libp2p"] + + [[parachains.collators]] + name = "para-collator02" + chain = "penpal-local" + command = "./target/release/polkadot-parachain" + args = ["-lxcm=trace", "--trie-cache-size=0", "--network-backend=libp2p", "--", "--network-backend=libp2p"] + From 96b007c0a4ec2489f915c9b4c76e3534f9cee22b Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 10 Dec 2025 18:30:24 -0300 Subject: [PATCH 18/69] choir: better order and rococo integration of intializer --- polkadot/runtime/parachains/src/broadcaster/mod.rs | 6 +++--- polkadot/runtime/rococo/src/lib.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index 06db00a38e090..7f1ad095893a9 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -569,9 +569,6 @@ pub mod pallet { ); } - // Get or create child trie for this publisher - let child_info = Self::get_or_create_publisher_child_info(origin_para_id); - let mut published_keys = PublishedKeys::::get(origin_para_id); // Count new unique keys to prevent exceeding MaxStoredKeys @@ -590,6 +587,9 @@ pub mod pallet { Error::::TooManyStoredKeys ); + // Get or create child trie for this publisher + let child_info = Self::get_or_create_publisher_child_info(origin_para_id); + // Write to child trie and track keys for enumeration for (key, value) in data { frame_support::storage::child::put(&child_info, &key, &value); diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 6dc11a71b7052..9ad62af1be23b 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1199,7 +1199,7 @@ impl parachains_initializer::Config for Runtime { type ForceOrigin = EnsureRoot; type WeightInfo = weights::polkadot_runtime_parachains_initializer::WeightInfo; type CoretimeOnNewSession = Coretime; - type OnNewSessionOutgoing = (); + type OnNewSessionOutgoing = Broadcaster; } impl parachains_disputes::Config for Runtime { From 2fe1e8044b9aab917045cb314b05668029b5ca97 Mon Sep 17 00:00:00 2001 From: metricaez Date: Thu, 11 Dec 2025 12:06:30 -0300 Subject: [PATCH 19/69] feat: fix benchmark generic --- .../xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 1f7eca3a73426..199543f4efa76 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -964,7 +964,6 @@ mod benchmarks { #[benchmark] fn publish(n: Linear<1, { MaxPublishItems::get() }>) -> Result<(), BenchmarkError> { use xcm::latest::{MaxPublishKeyLength, MaxPublishValueLength}; - use xcm_builder::test_utils::PublishedData; // The `Publish` instruction weight scales with the number of items published. // Each item is benchmarked at maximum key and value lengths to represent worst-case @@ -995,11 +994,6 @@ mod benchmarks { executor.bench_process(xcm)?; } - // Verify data was published (origin is Parachain(1000) from mock) - let published = PublishedData::get(); - assert!(published.contains_key(&1000)); - assert!(!published.get(&1000).unwrap().is_empty()); - Ok(()) } From e153ab38ebed5d9b9147f6d27fed314736770f10 Mon Sep 17 00:00:00 2001 From: metricaez Date: Thu, 11 Dec 2025 12:48:04 -0300 Subject: [PATCH 20/69] feat: KeyToIncludeInRelayProofApi --- Cargo.lock | 1 + cumulus/client/consensus/aura/src/collator.rs | 4 + .../consensus/aura/src/collators/basic.rs | 1 + .../consensus/aura/src/collators/lookahead.rs | 15 ++- .../consensus/aura/src/collators/mod.rs | 32 ++++++- .../slot_based/block_builder_task.rs | 2 +- .../aura/src/collators/slot_based/tests.rs | 9 ++ cumulus/client/parachain-inherent/src/lib.rs | 91 ++++++++++++++++++- .../src/lib.rs | 12 +++ .../client/relay-chain-interface/Cargo.toml | 1 + .../client/relay-chain-interface/src/lib.rs | 18 ++++ .../relay-chain-rpc-interface/src/lib.rs | 14 +++ .../polkadot-omni-node/lib/src/common/aura.rs | 2 + .../lib/src/fake_runtime_api/utils.rs | 7 ++ cumulus/primitives/core/src/lib.rs | 40 ++++++++ cumulus/test/runtime/src/lib.rs | 6 ++ 16 files changed, 248 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a959ef0c98433..3ac99dd9da2ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4870,6 +4870,7 @@ dependencies = [ "sp-api", "sp-blockchain", "sp-state-machine", + "sp-storage 19.0.0", "sp-version", "thiserror 1.0.65", ] diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 3999352322e20..2fab2f7e424d9 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -177,6 +177,7 @@ where parent_hash: Block::Hash, timestamp: impl Into>, relay_parent_descendants: Option, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { let paras_inherent_data = ParachainInherentDataProvider::create_at( @@ -188,6 +189,7 @@ where .map(RelayParentData::into_inherent_descendant_list) .unwrap_or_default(), Vec::new(), + relay_proof_request, collator_peer_id, ) .await; @@ -224,6 +226,7 @@ where validation_data: &PersistedValidationData, parent_hash: Block::Hash, timestamp: impl Into>, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { self.create_inherent_data_with_rp_offset( @@ -232,6 +235,7 @@ where parent_hash, timestamp, None, + relay_proof_request, collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 1f99e2f6e5cc0..532da7ede18e3 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -238,6 +238,7 @@ where &validation_data, parent_hash, claim.timestamp(), + Default::default(), params.collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 303b5268095c5..7d682ce0811f8 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -164,8 +164,10 @@ where + Send + Sync + 'static, - Client::Api: - AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Client::Api: AuraApi + + CollectCollationInfo + + AuraUnincludedSegmentApi + + cumulus_primitives_core::KeyToIncludeInRelayProofApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -216,8 +218,10 @@ where + Send + Sync + 'static, - Client::Api: - AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Client::Api: AuraApi + + CollectCollationInfo + + AuraUnincludedSegmentApi + + cumulus_primitives_core::KeyToIncludeInRelayProofApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -392,12 +396,15 @@ where // Build and announce collations recursively until // `can_build_upon` fails or building a collation fails. + let relay_proof_request = super::get_relay_proof_request(&*params.para_client, parent_hash); + let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data( relay_parent, &validation_data, parent_hash, slot_claim.timestamp(), + relay_proof_request, params.collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index d938dca69282f..0b2981691fc24 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -25,7 +25,9 @@ use crate::collator::SlotClaim; use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; -use cumulus_primitives_core::{relay_chain::Header as RelayHeader, BlockT}; +use cumulus_primitives_core::{ + relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, +}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use polkadot_node_subsystem::messages::{CollatorProtocolMessage, RuntimeApiRequest}; use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; @@ -662,6 +664,34 @@ mod tests { } } +/// Fetches relay chain storage proof requests from the parachain runtime. +/// +/// Queries the runtime API to determine which relay chain storage keys +/// (both top-level and child trie keys) should be included in the relay chain state proof. +/// +/// Falls back to an empty request if the runtime API call fails or is not implemented. +fn get_relay_proof_request( + client: &Client, + parent_hash: Block::Hash, +) -> cumulus_primitives_core::RelayProofRequest +where + Block: BlockT, + Client: ProvideRuntimeApi, + Client::Api: KeyToIncludeInRelayProofApi, +{ + client + .runtime_api() + .keys_to_prove(parent_hash) + .unwrap_or_else(|e| { + tracing::warn!( + target: crate::LOG_TARGET, + error = ?e, + "Failed to fetch relay proof requests from runtime, using empty request" + ); + Default::default() + }) +} + /// Holds a relay parent and its descendants. pub struct RelayParentData { /// The relay parent block header diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 4a58ed81426af..173550d995b63 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -359,7 +359,6 @@ where relay_parent_storage_root: *relay_parent_header.state_root(), max_pov_size: *max_pov_size, }; - let (parachain_inherent_data, other_inherent_data) = match collator .create_inherent_data_with_rp_offset( relay_parent, @@ -367,6 +366,7 @@ where parent_hash, slot_claim.timestamp(), Some(rp_data), + Default::default(), collator_peer_id, ) .await diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs index e0ba35e558afe..ef4ed09c6dc66 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs @@ -566,6 +566,15 @@ impl RelayChainInterface for TestRelayClient { unimplemented!("Not needed for test") } + async fn prove_child_read( + &self, + _: RelayHash, + _: &cumulus_relay_chain_interface::ChildInfo, + _: &[Vec], + ) -> RelayChainResult { + unimplemented!("Not needed for test") + } + async fn wait_for_block(&self, _: RelayHash) -> RelayChainResult<()> { unimplemented!("Not needed for test") } diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 5e994cd472f70..9e96446b34d96 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -30,6 +30,8 @@ pub use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_ use cumulus_relay_chain_interface::RelayChainInterface; pub use mock::{MockValidationDataInherentDataProvider, MockXcmConfig}; use sc_network_types::PeerId; +use sp_state_machine::StorageProof; +use sp_storage::ChildInfo; const LOG_TARGET: &str = "parachain-inherent"; @@ -157,6 +159,84 @@ async fn collect_relay_storage_proof( .ok() } +/// Collect storage proofs for relay chain data. +/// +/// Generates proofs for both top-level relay chain storage and child trie data. +/// Top-level keys are proven directly. Child trie roots are automatically included +/// from their standard storage locations (`:child_storage:default:` + identifier). +/// +/// Returns a merged proof combining all requested data, or `None` if there are no requests. +async fn collect_relay_storage_proofs( + relay_chain_interface: &impl RelayChainInterface, + relay_parent: PHash, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, +) -> Option { + use cumulus_primitives_core::RelayStorageKey; + + let cumulus_primitives_core::RelayProofRequest { keys } = relay_proof_request; + + if keys.is_empty() { + return None; + } + + let mut combined_proof: Option = None; + + // Group keys by storage type + let mut top_keys = Vec::new(); + let mut child_keys: std::collections::BTreeMap, Vec>> = + std::collections::BTreeMap::new(); + + for key in keys { + match key { + RelayStorageKey::Top(k) => top_keys.push(k), + RelayStorageKey::Child { info, key } => { + child_keys.entry(info).or_default().push(key); + }, + } + } + + // Collect top-level storage proofs + if !top_keys.is_empty() { + match relay_chain_interface.prove_read(relay_parent, &top_keys).await { + Ok(top_proof) => { + combined_proof = Some(top_proof); + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + error = ?e, + "Cannot obtain top-level storage proof from relay chain.", + ); + }, + } + } + + // Collect child trie proofs + for (storage_key, data_keys) in child_keys { + let child_info = ChildInfo::new_default(&storage_key); + match relay_chain_interface.prove_child_read(relay_parent, &child_info, &data_keys).await { + Ok(child_proof) => { + combined_proof = match combined_proof { + None => Some(child_proof), + Some(existing) => Some(StorageProof::merge([existing, child_proof])), + }; + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + child_trie_id = ?child_info.storage_key(), + error = ?e, + "Cannot obtain child trie proof from relay chain.", + ); + }, + } + } + + combined_proof +} + pub struct ParachainInherentDataProvider; impl ParachainInherentDataProvider { @@ -170,6 +250,7 @@ impl ParachainInherentDataProvider { para_id: ParaId, relay_parent_descendants: Vec, additional_relay_state_keys: Vec>, + relay_proof_request: cumulus_primitives_core::RelayProofRequest, collator_peer_id: PeerId, ) -> Option { let collator_peer_id = ApprovedPeerId::try_from(collator_peer_id.to_bytes()) @@ -188,7 +269,7 @@ impl ParachainInherentDataProvider { .iter() .skip(1) .any(sc_consensus_babe::contains_epoch_change::); - let relay_chain_state = collect_relay_storage_proof( + let mut relay_chain_state = collect_relay_storage_proof( relay_chain_interface, para_id, relay_parent, @@ -198,6 +279,14 @@ impl ParachainInherentDataProvider { ) .await?; + // Collect additional requested storage proofs (top-level and child tries) + if let Some(additional_proofs) = + collect_relay_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) + .await + { + relay_chain_state = StorageProof::merge([relay_chain_state, additional_proofs]); + } + let downward_messages = relay_chain_interface .retrieve_dmq_contents(para_id, relay_parent) .await diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index b989f81efd5dc..f7b3f810b6015 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -240,6 +240,18 @@ impl RelayChainInterface for RelayChainInProcessInterface { .map_err(RelayChainError::StateMachineError) } + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &cumulus_relay_chain_interface::ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult { + let state_backend = self.backend.state_at(relay_parent, TrieCacheContext::Untrusted)?; + + sp_state_machine::prove_child_read(state_backend, child_info, child_keys) + .map_err(RelayChainError::StateMachineError) + } + /// Wait for a given relay chain block in an async way. /// /// The caller needs to pass the hash of a block it waits for and the function will return when diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index be19f99526659..db89a573b3537 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -21,6 +21,7 @@ sc-network = { workspace = true, default-features = true } sp-api = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-storage = { workspace = true, default-features = true } sp-version = { workspace = true } async-trait = { workspace = true } diff --git a/cumulus/client/relay-chain-interface/src/lib.rs b/cumulus/client/relay-chain-interface/src/lib.rs index dd03738ed0029..8f87ccc6997b2 100644 --- a/cumulus/client/relay-chain-interface/src/lib.rs +++ b/cumulus/client/relay-chain-interface/src/lib.rs @@ -42,6 +42,7 @@ pub use cumulus_primitives_core::{ }; pub use polkadot_overseer::Handle as OverseerHandle; pub use sp_state_machine::StorageValue; +pub use sp_storage::ChildInfo; pub type RelayChainResult = Result; @@ -213,6 +214,14 @@ pub trait RelayChainInterface: Send + Sync { relevant_keys: &Vec>, ) -> RelayChainResult; + /// Generate a child trie storage read proof. + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult; + /// Returns the validation code hash for the given `para_id` using the given /// `occupied_core_assumption`. async fn validation_code_hash( @@ -354,6 +363,15 @@ where (**self).prove_read(relay_parent, relevant_keys).await } + async fn prove_child_read( + &self, + relay_parent: PHash, + child_info: &ChildInfo, + child_keys: &[Vec], + ) -> RelayChainResult { + (**self).prove_child_read(relay_parent, child_info, child_keys).await + } + async fn wait_for_block(&self, hash: PHash) -> RelayChainResult<()> { (**self).wait_for_block(hash).await } diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 84d22676789cf..9c7732e6e452e 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -210,6 +210,20 @@ impl RelayChainInterface for RelayChainRpcInterface { }) } + async fn prove_child_read( + &self, + _relay_parent: RelayHash, + _child_info: &cumulus_relay_chain_interface::ChildInfo, + _child_keys: &[Vec], + ) -> RelayChainResult { + // Not implemented: requires relay chain RPC to expose child trie proof method. + tracing::warn!( + target: "relay-chain-rpc-interface", + "prove_child_read not implemented for RPC interface, returning empty proof" + ); + Ok(StorageProof::empty()) + } + /// Wait for a given relay chain block /// /// The hash of the block to wait for is passed. We wait for the block to arrive or return after diff --git a/cumulus/polkadot-omni-node/lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs index 9ca725ff3279a..49630ae33033e 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/aura.rs @@ -53,6 +53,7 @@ pub trait AuraRuntimeApi: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi + + cumulus_primitives_core::KeyToIncludeInRelayProofApi + Sized { /// Check if the runtime has the Aura API. @@ -66,5 +67,6 @@ impl AuraRuntimeApi for T wher T: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi + + cumulus_primitives_core::KeyToIncludeInRelayProofApi { } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 56eb3d2ae7602..9d60924f55b84 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -175,6 +175,13 @@ macro_rules! impl_node_runtime_apis { unimplemented!() } } + + impl cumulus_primitives_core::KeyToIncludeInRelayProofApi<$block> for $runtime { + fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + unimplemented!() + } + } + #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime<$block> for $runtime { fn on_runtime_upgrade( diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 774961b6b7e6b..9ebc92daa6ffb 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -466,6 +466,30 @@ pub struct CollationInfo { pub head_data: HeadData, } +/// A relay chain storage key to be included in the storage proof. +#[derive(Clone, Debug, Encode, Decode, TypeInfo, PartialEq, Eq)] +pub enum RelayStorageKey { + /// Top-level relay chain storage key. + Top(Vec), + /// Child trie storage key. + Child { + /// Child trie storage key (unprefixed). + info: Vec, + /// Key within the child trie. + key: Vec, + }, +} + +/// Request for proving relay chain storage data. +/// +/// Contains a list of storage keys (either top-level or child trie keys) +/// to be included in the relay chain state proof. +#[derive(Clone, Debug, Encode, Decode, TypeInfo, PartialEq, Eq, Default)] +pub struct RelayProofRequest { + /// Storage keys to include in the relay chain state proof. + pub keys: Vec, +} + sp_api::decl_runtime_apis! { /// Runtime api to collect information about a collation. /// @@ -513,4 +537,20 @@ sp_api::decl_runtime_apis! { /// Returns the target number of blocks per relay chain slot. fn target_block_rate() -> u32; } + + /// API for specifying which relay chain storage data to include in storage proofs. + /// + /// This API allows parachains to request both top-level relay chain storage keys + /// and child trie storage keys to be included in the relay chain state proof. + pub trait KeyToIncludeInRelayProofApi { + /// Returns relay chain storage proof requests. + /// + /// The returned structure specifies: + /// - `top`: Top-level relay chain storage keys to include in the proof + /// - `children_default`: Child trie storage to include, with each entry containing + /// the child trie identifier and the keys to prove from that child trie + /// + /// The collator generates proofs for these and includes them in the relay chain state proof. + fn keys_to_prove() -> RelayProofRequest; + } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index dfe37e10d05d7..e191569a23127 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -641,6 +641,12 @@ impl_runtime_apis! { 1 } } + + impl cumulus_primitives_core::KeyToIncludeInRelayProofApi for Runtime { + fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + Default::default() + } + } } cumulus_pallet_parachain_system::register_validate_block! { From b01266aa8fb81f81d76a7c7464efe62fe8a881f5 Mon Sep 17 00:00:00 2001 From: metricaez Date: Thu, 11 Dec 2025 15:36:27 -0300 Subject: [PATCH 21/69] feat: ProcessChildTrieData on parachain-system and pallet Subscriber --- Cargo.lock | 14 ++ Cargo.toml | 2 + cumulus/pallets/aura-ext/src/test.rs | 1 + cumulus/pallets/parachain-system/src/lib.rs | 9 + .../src/relay_state_snapshot.rs | 29 +++ cumulus/pallets/subscriber/Cargo.toml | 37 +++ cumulus/pallets/subscriber/src/lib.rs | 213 ++++++++++++++++++ cumulus/pallets/xcmp-queue/src/mock.rs | 1 + .../assets/asset-hub-rococo/src/lib.rs | 1 + .../assets/asset-hub-westend/src/lib.rs | 1 + .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 + .../bridge-hubs/bridge-hub-westend/src/lib.rs | 1 + .../collectives-westend/src/lib.rs | 1 + .../coretime/coretime-westend/src/lib.rs | 1 + .../glutton/glutton-westend/src/lib.rs | 1 + .../runtimes/people/people-westend/src/lib.rs | 1 + .../runtimes/testing/penpal/src/lib.rs | 1 + .../testing/yet-another-parachain/src/lib.rs | 1 + cumulus/test/runtime/src/lib.rs | 1 + 19 files changed, 317 insertions(+) create mode 100644 cumulus/pallets/subscriber/Cargo.toml create mode 100644 cumulus/pallets/subscriber/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 3ac99dd9da2ab..1e81b8f54956e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4640,6 +4640,20 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "cumulus-pallet-subscriber" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-parachain-system", + "cumulus-primitives-core", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-core 28.0.0", + "sp-std 14.0.0", +] + [[package]] name = "cumulus-pallet-weight-reclaim" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 2c7f87d573f14..eb8af69d9af4d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,7 @@ members = [ "cumulus/pallets/parachain-system/proc-macro", "cumulus/pallets/session-benchmarking", "cumulus/pallets/solo-to-para", + "cumulus/pallets/subscriber", "cumulus/pallets/weight-reclaim", "cumulus/pallets/xcm", "cumulus/pallets/xcmp-queue", @@ -748,6 +749,7 @@ cumulus-pallet-aura-ext = { path = "cumulus/pallets/aura-ext", default-features cumulus-pallet-dmp-queue = { default-features = false, path = "cumulus/pallets/dmp-queue" } cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", default-features = false } cumulus-pallet-parachain-system-proc-macro = { path = "cumulus/pallets/parachain-system/proc-macro", default-features = false } +cumulus-pallet-subscriber = { path = "cumulus/pallets/subscriber", default-features = false } cumulus-pallet-session-benchmarking = { path = "cumulus/pallets/session-benchmarking", default-features = false } cumulus-pallet-solo-to-para = { path = "cumulus/pallets/solo-to-para", default-features = false } cumulus-pallet-weight-reclaim = { path = "cumulus/pallets/weight-reclaim", default-features = false } diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs index 7c4c78ab2a5b0..c8ffe91eb9417 100644 --- a/cumulus/pallets/aura-ext/src/test.rs +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -151,6 +151,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } fn set_ancestors() { diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 5ff4af131f565..d31a2b46be48f 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -85,6 +85,7 @@ use unincluded_segment::{ }; pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; +pub use relay_state_snapshot::ProcessChildTrieData; /// Register the `validate_block` function that is used by parachains to validate blocks on a /// validator. /// @@ -263,6 +264,12 @@ pub mod pallet { /// /// If set to 0, this config has no impact. type RelayParentOffset: Get; + + /// Processor for child trie data from relay chain state proofs. + /// + /// This allows parachains to process published data from other parachains + /// by reading it from child tries included in the relay chain state proof. + type ChildTrieProcessor: relay_state_snapshot::ProcessChildTrieData; } #[pallet::hooks] @@ -701,6 +708,8 @@ pub mod pallet { >::put(relevant_messaging_state.clone()); >::put(host_config); + total_weight.saturating_accrue(T::ChildTrieProcessor::process_child_trie_data(&relay_state_proof)); + ::on_validation_data(&vfp); if let Some(collator_peer_id) = collator_peer_id { diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 7138d61edd277..f7c1808c1c212 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -21,11 +21,24 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, AbridgedHrmpChannel, ParaId, }; +use frame_support::weights::Weight; use scale_info::TypeInfo; use sp_runtime::traits::HashingFor; use sp_state_machine::{Backend, TrieBackend, TrieBackendBuilder}; use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX}; +/// Process child trie data from verified relay chain state proofs. +pub trait ProcessChildTrieData { + /// Process child trie data from a verified relay state proof. + fn process_child_trie_data(verified_proof: &RelayChainStateProof) -> Weight; +} + +impl ProcessChildTrieData for () { + fn process_child_trie_data(_verified_proof: &RelayChainStateProof) -> Weight { + Weight::zero() + } +} + /// The capacity of the upward message queue of a parachain on the relay chain. // The field order should stay the same as the data can be found in the proof to ensure both are // have the same encoded representation. @@ -383,4 +396,20 @@ impl RelayChainStateProof { { read_optional_entry(&self.trie_backend, key).map_err(Error::ReadOptionalEntry) } + + /// Read a value from a child trie in the relay chain state proof. + /// + /// Returns `Ok(Some(value))` if the key exists in the child trie, + /// `Ok(None)` if the key doesn't exist, + /// or `Err` if there was a proof error. + pub fn read_child_storage( + &self, + child_info: &sp_core::storage::ChildInfo, + key: &[u8], + ) -> Result>, Error> { + use sp_state_machine::Backend; + self.trie_backend + .child_storage(child_info, key) + .map_err(|_| Error::ReadEntry(ReadEntryErr::Proof)) + } } diff --git a/cumulus/pallets/subscriber/Cargo.toml b/cumulus/pallets/subscriber/Cargo.toml new file mode 100644 index 0000000000000..cff3094d41e46 --- /dev/null +++ b/cumulus/pallets/subscriber/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "cumulus-pallet-subscriber" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +description = "Subscriber pallet for processing published data from relay chain state proofs" +license = "Apache-2.0" + +[lints] +workspace = true + +[dependencies] +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-core = { workspace = true } +sp-std = { workspace = true } + +# Cumulus +cumulus-pallet-parachain-system = { workspace = true } +cumulus-primitives-core = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "cumulus-pallet-parachain-system/std", + "cumulus-primitives-core/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-core/std", + "sp-std/std", +] diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs new file mode 100644 index 0000000000000..0e6ebe76ac7d8 --- /dev/null +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -0,0 +1,213 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Process child trie data from relay chain state proofs via configurable handler. + +extern crate alloc; + +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; +use codec::Decode; +use cumulus_pallet_parachain_system::relay_state_snapshot::RelayChainStateProof; +use cumulus_primitives_core::ParaId; +use frame_support::{ + defensive, + pallet_prelude::*, + storage::bounded_btree_map::BoundedBTreeMap, + traits::Get, +}; +use sp_std::vec; + +pub use pallet::*; + +pub use cumulus_pallet_parachain_system::relay_state_snapshot::ProcessChildTrieData; + +/// Define subscriptions and handle received data. +pub trait SubscriptionHandler { + /// List of subscriptions as (ParaId, keys) tuples. + fn subscriptions() -> Vec<(ParaId, Vec>)>; + + /// Called when subscribed data is updated. + fn on_data_updated(publisher: ParaId, key: Vec, value: Vec); +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + type SubscriptionHandler: SubscriptionHandler; + type WeightInfo: WeightInfo; + } + + /// Child trie roots from previous block for change detection. + #[pallet::storage] + pub type PreviousPublishedDataRoots = StorageValue< + _, + BoundedBTreeMap>, ConstU32<100>>, + ValueQuery, + >; + + impl Pallet { + /// Build relay proof requests from subscriptions. + /// + /// Returns a `RelayProofRequest` with child trie proof requests for subscribed data. + pub fn get_relay_proof_requests() -> cumulus_primitives_core::RelayProofRequest { + let storage_keys = T::SubscriptionHandler::subscriptions() + .into_iter() + .flat_map(|(para_id, data_keys)| { + let child_info = Self::derive_child_info(para_id); + let storage_key = child_info.storage_key().to_vec(); + data_keys.into_iter().map(move |key| { + cumulus_primitives_core::RelayStorageKey::Child { + info: storage_key.clone(), + key, + } + }) + }) + .collect(); + + cumulus_primitives_core::RelayProofRequest { keys: storage_keys } + } + + fn derive_child_info(publisher_para_id: ParaId) -> sp_core::storage::ChildInfo { + use codec::Encode; + sp_core::storage::ChildInfo::new_default(&(b"pubsub", publisher_para_id).encode()) + } + + fn collect_publisher_roots( + relay_state_proof: &RelayChainStateProof, + ) -> Vec<(ParaId, Vec)> { + let subscriptions = T::SubscriptionHandler::subscriptions(); + + subscriptions + .into_iter() + .filter_map(|(publisher_para_id, _keys)| { + let child_info = Self::derive_child_info(publisher_para_id); + let prefixed_key = child_info.prefixed_storage_key(); + + relay_state_proof + .read_optional_entry::<[u8; 32]>(&*prefixed_key) + .ok() + .flatten() + .map(|root_hash| (publisher_para_id, root_hash.to_vec())) + }) + .collect() + } + + fn process_published_data( + relay_state_proof: &RelayChainStateProof, + current_roots: &Vec<(ParaId, Vec)>, + ) -> Weight { + let previous_roots = >::get(); + + if current_roots.is_empty() && previous_roots.is_empty() { + return T::DbWeight::get().reads(1); + } + + let mut p = 0u32; + let mut k = 0u32; + let mut v = 0u32; + + let current_roots_map: BTreeMap> = + current_roots.iter().map(|(para_id, root)| (*para_id, root.clone())).collect(); + + let subscriptions = T::SubscriptionHandler::subscriptions(); + + for (publisher, subscription_keys) in subscriptions { + let should_update = match previous_roots.get(&publisher) { + Some(prev_root) => match current_roots_map.get(&publisher) { + Some(curr_root) if prev_root == curr_root => false, + _ => true, + }, + None => true, + }; + + if should_update && current_roots_map.contains_key(&publisher) { + let child_info = Self::derive_child_info(publisher); + + for key in subscription_keys.iter() { + match relay_state_proof.read_child_storage(&child_info, key) { + Ok(Some(encoded_value)) => { + match Vec::::decode(&mut &encoded_value[..]) { + Ok(value) => { + T::SubscriptionHandler::on_data_updated( + publisher, + key.clone(), + value.clone(), + ); + v = v.max(value.len() as u32); + k += 1; + }, + Err(_) => { + defensive!("Failed to decode published data value"); + }, + } + }, + Ok(None) => { + // Key not published yet - expected + }, + Err(_) => { + defensive!("Failed to read child storage from relay chain proof"); + }, + } + } + + p += 1; + } + } + + let bounded_roots: BoundedBTreeMap>, ConstU32<100>> = + current_roots_map + .into_iter() + .filter_map(|(para_id, root)| { + BoundedVec::try_from(root).ok().map(|bounded_root| (para_id, bounded_root)) + }) + .collect::>() + .try_into() + .unwrap_or_default(); + >::put(bounded_roots); + + T::WeightInfo::process_published_data(p, k, v) + } + } + + impl ProcessChildTrieData for Pallet { + fn process_child_trie_data(verified_proof: &RelayChainStateProof) -> Weight { + let current_roots = Self::collect_publisher_roots(verified_proof); + Self::process_published_data(verified_proof, ¤t_roots) + } + } +} + +pub trait WeightInfo { + fn process_published_data(p: u32, k: u32, v: u32) -> Weight; +} + +impl WeightInfo for () { + fn process_published_data(_p: u32, k: u32, v: u32) -> Weight { + Weight::from_parts(10_000_000, 0) + .saturating_add(Weight::from_parts(5_000 * k as u64, 0)) + .saturating_add(Weight::from_parts(100 * v as u64, 0)) + .saturating_add(frame_support::weights::constants::RocksDbWeight::get().reads(1 + k as u64)) + .saturating_add(frame_support::weights::constants::RocksDbWeight::get().writes(1)) + } +} diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 3be87221c052e..dc20f23098b28 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -106,6 +106,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 69c7a9e544326..c8b8ac2d3327f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -745,6 +745,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 8b9cbf66015c7..73e8f0a2bcd87 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -903,6 +903,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index cfc58a2a4f6eb..4559bfc9d8577 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -400,6 +400,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 62532fac5fec3..b8bdf256cb5d1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -392,6 +392,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index ba70aabd9d0cf..6f6c1d59a114d 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -423,6 +423,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index ed8748a34933a..eadb74b61bfde 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -305,6 +305,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index eadc4d289fe9d..b2b91a856704a 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -191,6 +191,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index dc4a616f02d1c..48b98a77a3b3d 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -281,6 +281,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index bc018b160f778..20a5c8a5909db 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -664,6 +664,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index 0f5d2ecd99494..cab8dbe1c326c 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -368,6 +368,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; + type ChildTrieProcessor = (); } impl pallet_message_queue::Config for Runtime { diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index e191569a23127..40b49a7feeb0e 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -385,6 +385,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; + type ChildTrieProcessor = (); } impl parachain_info::Config for Runtime {} From 8e19e210b76b2d02c48576a50075c73b042aa6f6 Mon Sep 17 00:00:00 2001 From: metricaez Date: Thu, 11 Dec 2025 16:17:48 -0300 Subject: [PATCH 22/69] feat: pubsubConsumber and rococo parachain integration --- Cargo.lock | 393 ++++++++++++++++--- Cargo.toml | 2 + cumulus/pallets/parachain-system/src/mock.rs | 1 + cumulus/pallets/pubsub-consumer/Cargo.toml | 35 ++ cumulus/pallets/pubsub-consumer/src/lib.rs | 90 +++++ 5 files changed, 469 insertions(+), 52 deletions(-) create mode 100644 cumulus/pallets/pubsub-consumer/Cargo.toml create mode 100644 cumulus/pallets/pubsub-consumer/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 1e81b8f54956e..2c2a54410c0c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1193,6 +1193,36 @@ dependencies = [ "testnet-parachains-constants", ] +[[package]] +name = "asset-hub-rococo-integration-tests" +version = "1.0.0" +dependencies = [ + "assert_matches", + "asset-test-utils", + "cumulus-pallet-parachain-system", + "emulated-integration-tests-common", + "frame-support", + "frame-system", + "pallet-asset-conversion", + "pallet-asset-rewards", + "pallet-assets", + "pallet-balances", + "pallet-message-queue", + "pallet-treasury", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-runtime-common", + "rococo-runtime-constants", + "rococo-system-emulated-network", + "sp-core 28.0.0", + "sp-runtime", + "staging-xcm", + "staging-xcm-executor", + "xcm-runtime-apis", +] + [[package]] name = "asset-hub-rococo-runtime" version = "0.11.0" @@ -1379,7 +1409,6 @@ dependencies = [ "pallet-indices", "pallet-message-queue", "pallet-migrations", - "pallet-multi-asset-bounties", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", @@ -2561,6 +2590,7 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", + "rococo-system-emulated-network", "rococo-westend-system-emulated-network", "scale-info", "snowbridge-inbound-queue-primitives", @@ -3734,6 +3764,104 @@ dependencies = [ "memchr", ] +[[package]] +name = "coretime-rococo-emulated-chain" +version = "0.1.0" +dependencies = [ + "coretime-rococo-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "sp-core 28.0.0", + "testnet-parachains-constants", +] + +[[package]] +name = "coretime-rococo-integration-tests" +version = "0.0.0" +dependencies = [ + "cumulus-pallet-parachain-system", + "emulated-integration-tests-common", + "frame-support", + "pallet-broker", + "pallet-message-queue", + "polkadot-runtime-parachains", + "rococo-runtime-constants", + "rococo-system-emulated-network", + "sp-runtime", + "staging-xcm", +] + +[[package]] +name = "coretime-rococo-runtime" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-weight-reclaim", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-broker", + "pallet-collator-selection", + "pallet-message-queue", + "pallet-multisig", + "pallet-proxy", + "pallet-session", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parachains-runtimes-test-utils", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "rococo-runtime-constants", + "scale-info", + "serde", + "serde_json", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core 28.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-keyring", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-storage 19.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "tracing", + "xcm-runtime-apis", +] + [[package]] name = "coretime-westend-emulated-chain" version = "0.1.0" @@ -4614,6 +4742,19 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "cumulus-pallet-pubsub-consumer" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-subscriber", + "cumulus-primitives-core", + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-std 14.0.0", +] + [[package]] name = "cumulus-pallet-session-benchmarking" version = "9.0.0" @@ -5015,9 +5156,6 @@ dependencies = [ "cumulus-primitives-core", "parity-scale-codec", "polkadot-primitives", - "proptest", - "sp-consensus-babe", - "sp-core 28.0.0", "sp-runtime", "sp-state-machine", "sp-trie", @@ -6677,7 +6815,7 @@ dependencies = [ "sp-version", "sp-wasm-interface 20.0.0", "substrate-test-runtime", - "subxt 0.43.1", + "subxt 0.43.0", "subxt-signer 0.43.0", "thiserror 1.0.65", "thousands", @@ -6717,7 +6855,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e56c0e51972d7b26ff76966c4d0f2307030df9daa5ce0885149ece1ab7ca5ad" dependencies = [ - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "parity-scale-codec", "scale-decode", "scale-info", @@ -6809,9 +6947,9 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "23.0.1" +version = "23.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba5be0edbdb824843a0f9c6f0906ecfc66c5316218d74457003218b24909ed0" +checksum = "d8c26fcb0454397c522c05fdad5380c4e622f8a875638af33bff5a320d1fc965" dependencies = [ "cfg-if", "parity-scale-codec", @@ -6826,7 +6964,7 @@ dependencies = [ "array-bytes 6.2.2", "const-hex", "docify", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "frame-support", "frame-system", "log", @@ -6905,7 +7043,7 @@ dependencies = [ "bitflags 1.3.2", "docify", "environmental", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "frame-support-procedural", "frame-system", "impl-trait-for-tuples", @@ -6994,7 +7132,7 @@ version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-executive", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "frame-support", "frame-support-test-pallet", "frame-system", @@ -10210,7 +10348,7 @@ checksum = "b3e3e3f549d27d2dc054372f320ddf68045a833fab490563ff70d4cf1b9d91ea" dependencies = [ "array-bytes 9.1.2", "blake3", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "parity-scale-codec", "scale-decode", "scale-info", @@ -12439,7 +12577,7 @@ dependencies = [ name = "pallet-example-view-functions" version = "1.0.0" dependencies = [ - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "frame-support", "frame-system", "log", @@ -13257,7 +13395,7 @@ dependencies = [ "sp-weights", "sqlx", "substrate-prometheus-endpoint", - "subxt 0.43.1", + "subxt 0.43.0", "subxt-signer 0.43.0", "thiserror 1.0.65", "tokio", @@ -14311,10 +14449,8 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "pallet-message-queue", - "pallet-multi-asset-bounties", "pallet-treasury", "pallet-xcm", - "parachains-common-types", "parity-scale-codec", "polkadot-primitives", "polkadot-runtime-common", @@ -14329,15 +14465,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "parachains-common-types" -version = "0.1.0" -dependencies = [ - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-runtime", -] - [[package]] name = "parachains-relay" version = "0.1.0" @@ -14647,6 +14774,103 @@ dependencies = [ "xcm-runtime-apis", ] +[[package]] +name = "people-rococo-emulated-chain" +version = "0.1.0" +dependencies = [ + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "people-rococo-runtime", + "sp-core 28.0.0", + "testnet-parachains-constants", +] + +[[package]] +name = "people-rococo-integration-tests" +version = "0.1.0" +dependencies = [ + "asset-test-utils", + "emulated-integration-tests-common", + "frame-support", + "pallet-balances", + "parachains-common", + "rococo-system-emulated-network", + "sp-runtime", + "staging-xcm", + "staging-xcm-executor", +] + +[[package]] +name = "people-rococo-runtime" +version = "0.1.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-weight-reclaim", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "enumflags2", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-identity", + "pallet-message-queue", + "pallet-migrations", + "pallet-multisig", + "pallet-proxy", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parachains-common", + "parachains-runtimes-test-utils", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "rococo-runtime-constants", + "scale-info", + "serde", + "serde_json", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core 28.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-keyring", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-storage 19.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "tracing", + "xcm-runtime-apis", +] + [[package]] name = "people-westend-emulated-chain" version = "0.1.0" @@ -15933,7 +16157,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc", "pallet-transaction-payment-rpc-runtime-api", - "parachains-common-types", + "parachains-common", "parity-scale-codec", "polkadot-cli", "polkadot-primitives", @@ -16026,6 +16250,7 @@ dependencies = [ "bridge-hub-westend-runtime", "collectives-westend-runtime", "color-eyre", + "coretime-rococo-runtime", "coretime-westend-runtime", "cumulus-client-consensus-aura", "cumulus-primitives-core", @@ -16034,8 +16259,10 @@ dependencies = [ "log", "parachains-common", "penpal-runtime", + "people-rococo-runtime", "people-westend-runtime", "polkadot-omni-node-lib", + "rococo-parachain-runtime", "sc-chain-spec", "sc-cli", "sc-service", @@ -16466,7 +16693,6 @@ dependencies = [ "pallet-xcm-bridge-hub-router", "pallet-xcm-precompiles", "parachains-common", - "parachains-common-types", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", @@ -17216,7 +17442,7 @@ dependencies = [ "sp-core 28.0.0", "sp-io", "substrate-build-script-utils", - "subxt 0.43.1", + "subxt 0.43.0", "tokio", "tokio-util", "zombienet-orchestrator", @@ -18863,6 +19089,60 @@ dependencies = [ "sp-keyring", ] +[[package]] +name = "rococo-parachain-runtime" +version = "0.6.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-pubsub-consumer", + "cumulus-pallet-subscriber", + "cumulus-pallet-weight-reclaim", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-ping", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "pallet-assets", + "pallet-aura", + "pallet-balances", + "pallet-message-queue", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "scale-info", + "serde_json", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core 28.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-keyring", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", +] + [[package]] name = "rococo-runtime" version = "7.0.0" @@ -18978,6 +19258,19 @@ dependencies = [ "staging-xcm-builder", ] +[[package]] +name = "rococo-system-emulated-network" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-emulated-chain", + "bridge-hub-rococo-emulated-chain", + "coretime-rococo-emulated-chain", + "emulated-integration-tests-common", + "penpal-emulated-chain", + "people-rococo-emulated-chain", + "rococo-emulated-chain", +] + [[package]] name = "rococo-westend-system-emulated-network" version = "0.0.0" @@ -20338,6 +20631,7 @@ dependencies = [ "sp-arithmetic", "sp-blockchain", "sp-consensus", + "sp-consensus-grandpa", "sp-core 28.0.0", "sp-runtime", "sp-test-primitives", @@ -20622,7 +20916,7 @@ dependencies = [ "sp-state-machine", "sp-version", "sp-wasm-interface 20.0.0", - "subxt 0.43.1", + "subxt 0.43.0", "thiserror 1.0.65", ] @@ -20898,7 +21192,6 @@ dependencies = [ "sp-runtime", "sp-tracing 16.0.0", "sp-transaction-pool", - "strum 0.26.3", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", @@ -20927,7 +21220,6 @@ dependencies = [ "sp-blockchain", "sp-core 28.0.0", "sp-runtime", - "strum 0.26.3", "thiserror 1.0.65", ] @@ -22056,7 +22348,6 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", - "impl-trait-for-tuples", "parity-scale-codec", "scale-info", "snowbridge-beacon-primitives", @@ -23074,7 +23365,6 @@ dependencies = [ name = "sp-debug-derive" version = "14.0.0" dependencies = [ - "proc-macro-warning", "proc-macro2 1.0.95", "quote 1.0.40", "syn 2.0.98", @@ -23191,7 +23481,7 @@ dependencies = [ name = "sp-metadata-ir" version = "0.6.0" dependencies = [ - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "parity-scale-codec", "scale-info", ] @@ -23304,7 +23594,6 @@ dependencies = [ "sp-tracing 16.0.0", "sp-trie", "sp-weights", - "strum 0.26.3", "substrate-test-runtime-client", "tracing", "tuplex", @@ -24588,7 +24877,7 @@ dependencies = [ "cargo_metadata", "console", "filetime", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "jobserver", "merkleized-metadata", "parity-scale-codec", @@ -24615,7 +24904,7 @@ dependencies = [ "anyhow", "env_logger 0.11.3", "log", - "subxt 0.43.1", + "subxt 0.43.0", "subxt-signer 0.43.0", "tokio", "zombienet-configuration", @@ -24680,14 +24969,14 @@ dependencies = [ [[package]] name = "subxt" -version = "0.43.1" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c6dc0f90e23c521465b8f7e026af04a48cc6f00c51d88a8d313d33096149de" +checksum = "74791ddeaaa6de42e7cc8a715c83eb73303f513f90af701fd07eb2caad92ed84" dependencies = [ "async-trait", "derive-where", "either", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "futures", "hex", "jsonrpsee", @@ -24703,7 +24992,7 @@ dependencies = [ "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "subxt-core 0.43.0", "subxt-lightclient 0.43.0", - "subxt-macro 0.43.1", + "subxt-macro 0.43.0", "subxt-metadata 0.43.0", "subxt-rpcs 0.43.0", "thiserror 2.0.12", @@ -24789,7 +25078,7 @@ dependencies = [ "blake2 0.10.6", "derive-where", "frame-decode 0.8.3", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "hashbrown 0.14.5", "hex", "impl-serde", @@ -24861,9 +25150,9 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.43.1" +version = "0.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c269228a2e5de4c0c61ed872b701967ee761df0f167d5b91ecec1185bca65793" +checksum = "69516e8ff0e9340a0f21b8398da7f997571af4734ee81deada5150a2668c8443" dependencies = [ "darling 0.20.10", "parity-scale-codec", @@ -24898,7 +25187,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c134068711c0c46906abc0e6e4911204420331530738e18ca903a5469364d9f" dependencies = [ "frame-decode 0.8.3", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "hashbrown 0.14.5", "parity-scale-codec", "scale-info", @@ -24938,7 +25227,7 @@ checksum = "25de7727144780d780a6a7d78bbfd28414b8adbab68b05e87329c367d7705be4" dependencies = [ "derive-where", "finito", - "frame-metadata 23.0.1", + "frame-metadata 23.0.0", "futures", "hex", "impl-serde", @@ -26086,9 +26375,9 @@ dependencies = [ [[package]] name = "trie-bench" -version = "0.42.1" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a03bee4700c5dd6b2ceba5e4e4d5a7017704a761481824d3033d223f9660973a" +checksum = "972be214c558b1a5550d34c8c7e55a284f6439cefc51226d6ffbfc152de5cc58" dependencies = [ "criterion", "hash-db", @@ -26102,9 +26391,9 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.31.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7795f2df2ef744e4ffb2125f09325e60a21d305cc3ecece0adeef03f7a9e560" +checksum = "6c0670ab45a6b7002c7df369fee950a27cf29ae0474343fd3a15aa15f691e7a6" dependencies = [ "hash-db", "log", @@ -28440,7 +28729,7 @@ dependencies = [ "serde_json", "sha2 0.10.9", "sp-core 36.1.0", - "subxt 0.43.1", + "subxt 0.43.0", "subxt-signer 0.43.0", "thiserror 1.0.65", "tokio", @@ -28503,7 +28792,7 @@ dependencies = [ "async-trait", "futures", "lazy_static", - "subxt 0.43.1", + "subxt 0.43.0", "subxt-signer 0.43.0", "tokio", "zombienet-configuration", diff --git a/Cargo.toml b/Cargo.toml index eb8af69d9af4d..b8c3bb04f9bc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,6 +83,7 @@ members = [ "cumulus/pallets/dmp-queue", "cumulus/pallets/parachain-system", "cumulus/pallets/parachain-system/proc-macro", + "cumulus/pallets/pubsub-consumer", "cumulus/pallets/session-benchmarking", "cumulus/pallets/solo-to-para", "cumulus/pallets/subscriber", @@ -749,6 +750,7 @@ cumulus-pallet-aura-ext = { path = "cumulus/pallets/aura-ext", default-features cumulus-pallet-dmp-queue = { default-features = false, path = "cumulus/pallets/dmp-queue" } cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", default-features = false } cumulus-pallet-parachain-system-proc-macro = { path = "cumulus/pallets/parachain-system/proc-macro", default-features = false } +cumulus-pallet-pubsub-consumer = { path = "cumulus/pallets/pubsub-consumer", default-features = false } cumulus-pallet-subscriber = { path = "cumulus/pallets/subscriber", default-features = false } cumulus-pallet-session-benchmarking = { path = "cumulus/pallets/session-benchmarking", default-features = false } cumulus-pallet-solo-to-para = { path = "cumulus/pallets/solo-to-para", default-features = false } diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index d3c7cef52b637..720de35b9766c 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -99,6 +99,7 @@ impl Config for Test { type ConsensusHook = TestConsensusHook; type WeightInfo = (); type RelayParentOffset = ConstU32<0>; + type ChildTrieProcessor = (); } std::thread_local! { diff --git a/cumulus/pallets/pubsub-consumer/Cargo.toml b/cumulus/pallets/pubsub-consumer/Cargo.toml new file mode 100644 index 0000000000000..88a01958ef1df --- /dev/null +++ b/cumulus/pallets/pubsub-consumer/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "cumulus-pallet-pubsub-consumer" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +description = "Example consumer pallet for testing pubsub subscriptions" +license = "Apache-2.0" + +[lints] +workspace = true + +[dependencies] +codec = { features = ["derive"], workspace = true } +scale-info = { features = ["derive"], workspace = true } + +# Substrate +frame-support = { workspace = true } +frame-system = { workspace = true } +sp-std = { workspace = true } + +# Cumulus +cumulus-pallet-subscriber = { workspace = true } +cumulus-primitives-core = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "cumulus-pallet-subscriber/std", + "cumulus-primitives-core/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-std/std", +] diff --git a/cumulus/pallets/pubsub-consumer/src/lib.rs b/cumulus/pallets/pubsub-consumer/src/lib.rs new file mode 100644 index 0000000000000..8d917119c1131 --- /dev/null +++ b/cumulus/pallets/pubsub-consumer/src/lib.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Test consumer for pubsub subscriptions. + +extern crate alloc; + +use alloc::vec::Vec; +use cumulus_primitives_core::ParaId; +use frame_support::{pallet_prelude::*, BoundedVec}; + +pub use pallet::*; + +pub struct TestSubscriptionHandler(core::marker::PhantomData); + +impl cumulus_pallet_subscriber::SubscriptionHandler for TestSubscriptionHandler { + fn subscriptions() -> Vec<(ParaId, Vec>)> { + alloc::vec![(ParaId::from(1000), alloc::vec![alloc::vec![0x12, 0x34]])] + } + + fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) { + let bounded_key: BoundedVec> = + key.clone().try_into().unwrap_or_default(); + let bounded_value: BoundedVec> = + value.clone().try_into().unwrap_or_default(); + + >::insert(&publisher, &bounded_key, &bounded_value); + + Pallet::::deposit_event(Event::DataReceived { + publisher, + key: bounded_key, + value: bounded_value, + }); + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config>> {} + + #[pallet::storage] + pub type ReceivedData = StorageDoubleMap< + _, + Blake2_128Concat, + ParaId, + Blake2_128Concat, + BoundedVec>, + BoundedVec>, + OptionQuery, + >; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + DataReceived { + publisher: ParaId, + key: BoundedVec>, + value: BoundedVec>, + }, + } + + impl Pallet { + pub fn get_data(publisher: ParaId, key: &[u8]) -> Option> { + let bounded_key: BoundedVec> = key.to_vec().try_into().ok()?; + ReceivedData::::get(publisher, bounded_key).map(|v| v.into_inner()) + } + } +} From d60113112937a615c44ded4da67e626acbc843f5 Mon Sep 17 00:00:00 2001 From: metricaez Date: Thu, 11 Dec 2025 21:31:01 -0300 Subject: [PATCH 23/69] feat: basic subscriber events, errors and cleanup --- cumulus/pallets/subscriber/src/lib.rs | 61 ++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 0e6ebe76ac7d8..e6e7518079e98 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -48,6 +48,7 @@ pub trait SubscriptionHandler { #[frame_support::pallet] pub mod pallet { use super::*; + use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); @@ -66,6 +67,49 @@ pub mod pallet { ValueQuery, >; + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Data was received and processed from a publisher. + DataProcessed { + publisher: ParaId, + key: BoundedVec>, + value_size: u32, + }, + /// Stored publisher roots were cleared. + RootsCleared { count: u32 }, + } + + #[pallet::error] + pub enum Error { + /// No stored roots to clear. + NoStoredRoots, + } + + #[pallet::call] + impl Pallet { + /// Clear all stored publisher roots. + /// + /// This forces reprocessing of all subscribed data in the next block. + /// Useful for recovery scenarios or manual cache invalidation. + /// + /// - `origin`: Must be root. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::clear_stored_roots())] + pub fn clear_stored_roots(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + + let roots = >::get(); + ensure!(!roots.is_empty(), Error::::NoStoredRoots); + + let count = roots.len() as u32; + >::kill(); + + Self::deposit_event(Event::RootsCleared { count }); + Ok(()) + } + } + impl Pallet { /// Build relay proof requests from subscriptions. /// @@ -149,12 +193,22 @@ pub mod pallet { Ok(Some(encoded_value)) => { match Vec::::decode(&mut &encoded_value[..]) { Ok(value) => { + let value_size = value.len() as u32; T::SubscriptionHandler::on_data_updated( publisher, key.clone(), value.clone(), ); - v = v.max(value.len() as u32); + + if let Ok(bounded_key) = BoundedVec::try_from(key.clone()) { + Self::deposit_event(Event::DataProcessed { + publisher, + key: bounded_key, + value_size, + }); + } + + v = v.max(value_size); k += 1; }, Err(_) => { @@ -200,6 +254,7 @@ pub mod pallet { pub trait WeightInfo { fn process_published_data(p: u32, k: u32, v: u32) -> Weight; + fn clear_stored_roots() -> Weight; } impl WeightInfo for () { @@ -210,4 +265,8 @@ impl WeightInfo for () { .saturating_add(frame_support::weights::constants::RocksDbWeight::get().reads(1 + k as u64)) .saturating_add(frame_support::weights::constants::RocksDbWeight::get().writes(1)) } + + fn clear_stored_roots() -> Weight { + frame_support::weights::constants::RocksDbWeight::get().reads_writes(1, 1) + } } From 776ca970575442b210c25a8907ec0ecdb374297f Mon Sep 17 00:00:00 2001 From: metricaez Date: Thu, 11 Dec 2025 23:54:56 -0300 Subject: [PATCH 24/69] feat: Subscriber tests --- Cargo.lock | 5 + cumulus/pallets/subscriber/Cargo.toml | 7 ++ cumulus/pallets/subscriber/src/lib.rs | 4 + cumulus/pallets/subscriber/src/mock.rs | 97 ++++++++++++++++ cumulus/pallets/subscriber/src/tests.rs | 143 ++++++++++++++++++++++++ 5 files changed, 256 insertions(+) create mode 100644 cumulus/pallets/subscriber/src/mock.rs create mode 100644 cumulus/pallets/subscriber/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 2c2a54410c0c0..c3f17722d58ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4790,9 +4790,14 @@ dependencies = [ "frame-support", "frame-system", "parity-scale-codec", + "polkadot-primitives", "scale-info", "sp-core 28.0.0", + "sp-io", + "sp-runtime", + "sp-state-machine", "sp-std 14.0.0", + "sp-trie", ] [[package]] diff --git a/cumulus/pallets/subscriber/Cargo.toml b/cumulus/pallets/subscriber/Cargo.toml index cff3094d41e46..4eecadef89fd6 100644 --- a/cumulus/pallets/subscriber/Cargo.toml +++ b/cumulus/pallets/subscriber/Cargo.toml @@ -23,6 +23,13 @@ sp-std = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-primitives-core = { workspace = true } +[dev-dependencies] +polkadot-primitives = { workspace = true, default-features = true } +sp-io = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } + [features] default = ["std"] std = [ diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index e6e7518079e98..d1ebfddca3b17 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -36,6 +36,10 @@ pub use pallet::*; pub use cumulus_pallet_parachain_system::relay_state_snapshot::ProcessChildTrieData; +mod mock; +#[cfg(test)] +mod tests; + /// Define subscriptions and handle received data. pub trait SubscriptionHandler { /// List of subscriptions as (ParaId, keys) tuples. diff --git a/cumulus/pallets/subscriber/src/mock.rs b/cumulus/pallets/subscriber/src/mock.rs new file mode 100644 index 0000000000000..86a26737bef00 --- /dev/null +++ b/cumulus/pallets/subscriber/src/mock.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg(test)] + +use super::*; +use codec::Encode; +use cumulus_pallet_parachain_system::RelayChainStateProof; +use cumulus_primitives_core::ParaId; +use frame_support::{derive_impl, parameter_types}; +use sp_runtime::{BuildStorage, StateVersion}; +use sp_state_machine::{Backend, TrieBackendBuilder}; +use sp_trie::{PrefixedMemoryDB, StorageProof}; + +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + Subscriber: crate, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type Block = Block; +} + +// Test handler that records calls +parameter_types! { + pub static ReceivedData: Vec<(ParaId, Vec, Vec)> = vec![]; + pub static TestSubscriptions: Vec<(ParaId, Vec>)> = vec![]; +} + +pub struct TestHandler; +impl SubscriptionHandler for TestHandler { + fn subscriptions() -> Vec<(ParaId, Vec>)> { + TestSubscriptions::get() + } + + fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) { + ReceivedData::mutate(|d| d.push((publisher, key, value))); + } +} + +impl crate::Config for Test { + type SubscriptionHandler = TestHandler; + type WeightInfo = (); +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + t.into() +} + +/// Minimal relay chain state proof builder for subscriber tests +pub fn build_sproof_with_child_data( + publisher_para_id: ParaId, + child_data: Vec<(Vec, Vec)>, +) -> RelayChainStateProof { + use sp_runtime::traits::HashingFor; + + let (db, root) = PrefixedMemoryDB::>::default_with_root(); + let state_version = StateVersion::default(); + let mut backend = TrieBackendBuilder::new(db, root).build(); + + // Derive child info same way as pallet + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", publisher_para_id).encode()); + + // Insert child trie data + let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); + backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); + + // Get child trie root and insert it in main trie + let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; + let prefixed_key = child_info.prefixed_storage_key(); + backend.insert( + vec![(None, vec![(prefixed_key.to_vec(), Some(child_root.encode()))])], + state_version, + ); + + let root = *backend.root(); + + // Prove child trie keys + let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); + let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) + .expect("prove child read"); + + // Prove child root in main trie + let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, vec![prefixed_key.to_vec()]) + .expect("prove read"); + + // Merge proofs + let proof = StorageProof::merge(vec![main_proof, child_proof]); + + RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") +} diff --git a/cumulus/pallets/subscriber/src/tests.rs b/cumulus/pallets/subscriber/src/tests.rs new file mode 100644 index 0000000000000..2fd59934f039e --- /dev/null +++ b/cumulus/pallets/subscriber/src/tests.rs @@ -0,0 +1,143 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg(test)] + +use super::*; +use crate::mock::*; +use codec::Encode; +use cumulus_primitives_core::ParaId; +use frame_support::{assert_ok, traits::Get}; + +#[test] +fn process_child_trie_data_with_new_data_calls_handler() { + new_test_ext().execute_with(|| { + ReceivedData::set(vec![]); + let publisher = ParaId::from(1000); + let key = vec![0x12, 0x34]; + let value = vec![0xAA, 0xBB].encode(); + + TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); + + let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); + + Pallet::::process_child_trie_data(&proof); + + let received = ReceivedData::get(); + assert_eq!(received.len(), 1); + assert_eq!(received[0].0, publisher); + assert_eq!(received[0].1, key); + assert_eq!(received[0].2, Vec::::decode(&mut &value[..]).unwrap()); + }); +} + +#[test] +fn process_empty_subscriptions() { + new_test_ext().execute_with(|| { + ReceivedData::set(vec![]); + TestSubscriptions::set(vec![]); + + let proof = build_sproof_with_child_data(ParaId::from(1000), vec![]); + + Pallet::::process_child_trie_data(&proof); + + assert_eq!(ReceivedData::get().len(), 0); + }); +} + +#[test] +fn root_change_triggers_processing() { + new_test_ext().execute_with(|| { + ReceivedData::set(vec![]); + let publisher = ParaId::from(1000); + let key = vec![0x01]; + let value1 = vec![0x11].encode(); + let value2 = vec![0x22].encode(); + + TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); + + // First block + let proof1 = build_sproof_with_child_data(publisher, vec![(key.clone(), value1.clone())]); + Pallet::::process_child_trie_data(&proof1); + assert_eq!(ReceivedData::get().len(), 1); + + // Second block with different value (root changed) + ReceivedData::set(vec![]); + let proof2 = build_sproof_with_child_data(publisher, vec![(key.clone(), value2.clone())]); + Pallet::::process_child_trie_data(&proof2); + + assert_eq!(ReceivedData::get().len(), 1); + assert_eq!(ReceivedData::get()[0].2, Vec::::decode(&mut &value2[..]).unwrap()); + }); +} + +#[test] +fn unchanged_root_skips_processing() { + new_test_ext().execute_with(|| { + ReceivedData::set(vec![]); + let publisher = ParaId::from(1000); + let key = vec![0x01]; + let value = vec![0x11].encode(); + + TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); + + // First block + let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); + Pallet::::process_child_trie_data(&proof); + assert_eq!(ReceivedData::get().len(), 1); + + // Second block with same data (unchanged root) + ReceivedData::set(vec![]); + let proof2 = build_sproof_with_child_data(publisher, vec![(key.clone(), value)]); + Pallet::::process_child_trie_data(&proof2); + + assert_eq!(ReceivedData::get().len(), 0, "Handler should not be called for unchanged root"); + }); +} + +#[test] +fn clear_stored_roots_extrinsic() { + new_test_ext().execute_with(|| { + let publisher = ParaId::from(1000); + TestSubscriptions::set(vec![(publisher, vec![vec![0x01]])]); + + // Store some roots + let proof = build_sproof_with_child_data(publisher, vec![(vec![0x01], vec![0x11].encode())]); + Pallet::::process_child_trie_data(&proof); + + assert!(!PreviousPublishedDataRoots::::get().is_empty()); + + // Clear roots + assert_ok!(Pallet::::clear_stored_roots(frame_system::RawOrigin::Root.into())); + + assert!(PreviousPublishedDataRoots::::get().is_empty()); + }); +} + +#[test] +fn data_processed_event_emitted() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + + let publisher = ParaId::from(1000); + let key = vec![0x12]; + let value = vec![0xAA].encode(); + + TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); + + let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); + Pallet::::process_child_trie_data(&proof); + + // value_size is the decoded Vec length, not the encoded length + let decoded_len = Vec::::decode(&mut &value[..]).unwrap().len() as u32; + + System::assert_has_event( + Event::DataProcessed { + publisher, + key: key.try_into().unwrap(), + value_size: decoded_len, + } + .into(), + ); + }); +} From 2a961e43c312657614b456d315d1bdc50546895a Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 10:37:20 -0300 Subject: [PATCH 25/69] choir: better RelayStorageKey comments and naming --- cumulus/client/parachain-inherent/src/lib.rs | 4 ++-- cumulus/pallets/subscriber/src/lib.rs | 13 ++++++++----- cumulus/primitives/core/src/lib.rs | 15 +++++++++------ 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 9e96446b34d96..8ea3c4f96c5ae 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -189,8 +189,8 @@ async fn collect_relay_storage_proofs( for key in keys { match key { RelayStorageKey::Top(k) => top_keys.push(k), - RelayStorageKey::Child { info, key } => { - child_keys.entry(info).or_default().push(key); + RelayStorageKey::Child { storage_key, key } => { + child_keys.entry(storage_key).or_default().push(key); }, } } diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index d1ebfddca3b17..fe8e639fe871f 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -122,11 +122,10 @@ pub mod pallet { let storage_keys = T::SubscriptionHandler::subscriptions() .into_iter() .flat_map(|(para_id, data_keys)| { - let child_info = Self::derive_child_info(para_id); - let storage_key = child_info.storage_key().to_vec(); + let storage_key = Self::derive_storage_key(para_id); data_keys.into_iter().map(move |key| { cumulus_primitives_core::RelayStorageKey::Child { - info: storage_key.clone(), + storage_key: storage_key.clone(), key, } }) @@ -136,9 +135,13 @@ pub mod pallet { cumulus_primitives_core::RelayProofRequest { keys: storage_keys } } - fn derive_child_info(publisher_para_id: ParaId) -> sp_core::storage::ChildInfo { + fn derive_storage_key(publisher_para_id: ParaId) -> Vec { use codec::Encode; - sp_core::storage::ChildInfo::new_default(&(b"pubsub", publisher_para_id).encode()) + (b"pubsub", publisher_para_id).encode() + } + + fn derive_child_info(publisher_para_id: ParaId) -> sp_core::storage::ChildInfo { + sp_core::storage::ChildInfo::new_default(&Self::derive_storage_key(publisher_para_id)) } fn collect_publisher_roots( diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 9ebc92daa6ffb..58dc02fd75279 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -473,8 +473,10 @@ pub enum RelayStorageKey { Top(Vec), /// Child trie storage key. Child { - /// Child trie storage key (unprefixed). - info: Vec, + /// Unprefixed storage key identifying the child trie root location. + /// Prefix `:child_storage:default:` is added when accessing storage. + /// Used to derive `ChildInfo` for reading child trie data. + storage_key: Vec, /// Key within the child trie. key: Vec, }, @@ -545,10 +547,11 @@ sp_api::decl_runtime_apis! { pub trait KeyToIncludeInRelayProofApi { /// Returns relay chain storage proof requests. /// - /// The returned structure specifies: - /// - `top`: Top-level relay chain storage keys to include in the proof - /// - `children_default`: Child trie storage to include, with each entry containing - /// the child trie identifier and the keys to prove from that child trie + /// The returned `RelayProofRequest` contains a list of storage keys where each key + /// can be either: + /// - `RelayStorageKey::Top`: Top-level relay chain storage key + /// - `RelayStorageKey::Child`: Child trie storage, containing the child trie identifier + /// and the key to prove from that child trie /// /// The collator generates proofs for these and includes them in the relay chain state proof. fn keys_to_prove() -> RelayProofRequest; From 8c91f58a9099dc9fed4aaf7da85961deac54ea17 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 10:52:27 -0300 Subject: [PATCH 26/69] choir: ProcessChildTrieData to RelayProofKeysProcessor for better naming --- cumulus/pallets/aura-ext/src/test.rs | 2 +- cumulus/pallets/parachain-system/src/lib.rs | 13 +++++++------ cumulus/pallets/parachain-system/src/mock.rs | 2 +- .../parachain-system/src/relay_state_snapshot.rs | 14 ++++++++------ cumulus/pallets/subscriber/src/lib.rs | 10 +++++++--- cumulus/pallets/xcmp-queue/src/mock.rs | 2 +- .../runtimes/assets/asset-hub-rococo/src/lib.rs | 2 +- .../runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../collectives/collectives-westend/src/lib.rs | 2 +- .../runtimes/coretime/coretime-westend/src/lib.rs | 2 +- .../runtimes/glutton/glutton-westend/src/lib.rs | 2 +- .../runtimes/people/people-westend/src/lib.rs | 2 +- .../parachains/runtimes/testing/penpal/src/lib.rs | 2 +- .../testing/yet-another-parachain/src/lib.rs | 2 +- cumulus/test/runtime/src/lib.rs | 2 +- 17 files changed, 36 insertions(+), 29 deletions(-) diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs index c8ffe91eb9417..3486e56a5c2e4 100644 --- a/cumulus/pallets/aura-ext/src/test.rs +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -151,7 +151,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } fn set_ancestors() { diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index d31a2b46be48f..ed55293a88db0 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -85,7 +85,7 @@ use unincluded_segment::{ }; pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; -pub use relay_state_snapshot::ProcessChildTrieData; +pub use relay_state_snapshot::ProcessRelayProofKeys; /// Register the `validate_block` function that is used by parachains to validate blocks on a /// validator. /// @@ -265,11 +265,12 @@ pub mod pallet { /// If set to 0, this config has no impact. type RelayParentOffset: Get; - /// Processor for child trie data from relay chain state proofs. + /// Processor for relay chain proof keys. /// - /// This allows parachains to process published data from other parachains - /// by reading it from child tries included in the relay chain state proof. - type ChildTrieProcessor: relay_state_snapshot::ProcessChildTrieData; + /// This allows parachains to process data from the relay chain state proof, + /// including both child trie keys and main trie keys that were requested + /// via `KeyToIncludeInRelayProofApi`. + type RelayProofKeysProcessor: relay_state_snapshot::ProcessRelayProofKeys; } #[pallet::hooks] @@ -708,7 +709,7 @@ pub mod pallet { >::put(relevant_messaging_state.clone()); >::put(host_config); - total_weight.saturating_accrue(T::ChildTrieProcessor::process_child_trie_data(&relay_state_proof)); + total_weight.saturating_accrue(T::RelayProofKeysProcessor::process_relay_proof_keys(&relay_state_proof)); ::on_validation_data(&vfp); diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index 720de35b9766c..b361031be2c37 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -99,7 +99,7 @@ impl Config for Test { type ConsensusHook = TestConsensusHook; type WeightInfo = (); type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } std::thread_local! { diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index f7c1808c1c212..7c6efb5ddf73e 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -27,14 +27,16 @@ use sp_runtime::traits::HashingFor; use sp_state_machine::{Backend, TrieBackend, TrieBackendBuilder}; use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX}; -/// Process child trie data from verified relay chain state proofs. -pub trait ProcessChildTrieData { - /// Process child trie data from a verified relay state proof. - fn process_child_trie_data(verified_proof: &RelayChainStateProof) -> Weight; +/// Process keys from verified relay chain state proofs. +/// +/// This trait allows processing of relay chain storage data from the verified proof. +pub trait ProcessRelayProofKeys { + /// Process keys from a verified relay state proof. + fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight; } -impl ProcessChildTrieData for () { - fn process_child_trie_data(_verified_proof: &RelayChainStateProof) -> Weight { +impl ProcessRelayProofKeys for () { + fn process_relay_proof_keys(_verified_proof: &RelayChainStateProof) -> Weight { Weight::zero() } } diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index fe8e639fe871f..19969e58e69e4 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -34,7 +34,7 @@ use sp_std::vec; pub use pallet::*; -pub use cumulus_pallet_parachain_system::relay_state_snapshot::ProcessChildTrieData; +pub use cumulus_pallet_parachain_system::relay_state_snapshot::ProcessRelayProofKeys; mod mock; #[cfg(test)] @@ -251,8 +251,12 @@ pub mod pallet { } } - impl ProcessChildTrieData for Pallet { - fn process_child_trie_data(verified_proof: &RelayChainStateProof) -> Weight { + impl ProcessRelayProofKeys for Pallet { + /// Process child trie data from the relay proof. + /// + /// Note: This implementation only processes child trie keys (pubsub data). + /// Main trie keys in the proof are intentionally ignored. + fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight { let current_roots = Self::collect_publisher_roots(verified_proof); Self::process_published_data(verified_proof, ¤t_roots) } diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index dc20f23098b28..1e32c9003a948 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -106,7 +106,7 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index c8b8ac2d3327f..720ebe8ee2b4b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -745,7 +745,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 73e8f0a2bcd87..f0cb527c42f09 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -903,7 +903,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 4559bfc9d8577..dac8fc398127b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -400,7 +400,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index b8bdf256cb5d1..625c2ebe24507 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -392,7 +392,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 6f6c1d59a114d..37edc0f02329c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -423,7 +423,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index eadb74b61bfde..fb72ae7a59586 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -305,7 +305,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index b2b91a856704a..98131ac4ef58a 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -191,7 +191,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 48b98a77a3b3d..427ff1454d8fd 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -281,7 +281,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 20a5c8a5909db..4d1ee6a0d2254 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -664,7 +664,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; type RelayParentOffset = ConstU32<0>; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index cab8dbe1c326c..1c5ead9e7b90b 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -368,7 +368,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } impl pallet_message_queue::Config for Runtime { diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 40b49a7feeb0e..c20113aa69b24 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -385,7 +385,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; - type ChildTrieProcessor = (); + type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} From 6ed71cba791baa060b60d861486a72a2a49e6765 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 11:07:09 -0300 Subject: [PATCH 27/69] choir: better imports --- cumulus/client/consensus/aura/src/collators/lookahead.rs | 6 +++--- cumulus/pallets/subscriber/src/lib.rs | 6 +++--- cumulus/polkadot-omni-node/lib/src/common/aura.rs | 5 +++-- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 7d682ce0811f8..c33ce1c41d6c9 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -36,7 +36,7 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; +use cumulus_primitives_core::{CollectCollationInfo, KeyToIncludeInRelayProofApi, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use sp_consensus::Environment; @@ -167,7 +167,7 @@ where Client::Api: AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi - + cumulus_primitives_core::KeyToIncludeInRelayProofApi, + + KeyToIncludeInRelayProofApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -221,7 +221,7 @@ where Client::Api: AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi - + cumulus_primitives_core::KeyToIncludeInRelayProofApi, + + KeyToIncludeInRelayProofApi, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 19969e58e69e4..a513683d5c42b 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -22,7 +22,9 @@ extern crate alloc; use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use codec::Decode; -use cumulus_pallet_parachain_system::relay_state_snapshot::RelayChainStateProof; +use cumulus_pallet_parachain_system::relay_state_snapshot::{ + ProcessRelayProofKeys, RelayChainStateProof, +}; use cumulus_primitives_core::ParaId; use frame_support::{ defensive, @@ -34,8 +36,6 @@ use sp_std::vec; pub use pallet::*; -pub use cumulus_pallet_parachain_system::relay_state_snapshot::ProcessRelayProofKeys; - mod mock; #[cfg(test)] mod tests; diff --git a/cumulus/polkadot-omni-node/lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs index 49630ae33033e..10a631306b33a 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/aura.rs @@ -18,6 +18,7 @@ use codec::Codec; use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::KeyToIncludeInRelayProofApi; use sp_consensus_aura::AuraApi; use sp_runtime::{ app_crypto::{AppCrypto, AppPair, AppSignature, Pair}, @@ -53,7 +54,7 @@ pub trait AuraRuntimeApi: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi - + cumulus_primitives_core::KeyToIncludeInRelayProofApi + + KeyToIncludeInRelayProofApi + Sized { /// Check if the runtime has the Aura API. @@ -67,6 +68,6 @@ impl AuraRuntimeApi for T wher T: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi - + cumulus_primitives_core::KeyToIncludeInRelayProofApi + + KeyToIncludeInRelayProofApi { } From cbeb13b3dfa12b50d80630580638ec5ee172ead6 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 11:17:37 -0300 Subject: [PATCH 28/69] fix: Subscriber test --- cumulus/pallets/subscriber/src/tests.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/cumulus/pallets/subscriber/src/tests.rs b/cumulus/pallets/subscriber/src/tests.rs index 2fd59934f039e..8559895f87d5f 100644 --- a/cumulus/pallets/subscriber/src/tests.rs +++ b/cumulus/pallets/subscriber/src/tests.rs @@ -6,11 +6,12 @@ use super::*; use crate::mock::*; use codec::Encode; +use cumulus_pallet_parachain_system::relay_state_snapshot::ProcessRelayProofKeys; use cumulus_primitives_core::ParaId; -use frame_support::{assert_ok, traits::Get}; +use frame_support::assert_ok; #[test] -fn process_child_trie_data_with_new_data_calls_handler() { +fn process_relay_proof_keys_with_new_data_calls_handler() { new_test_ext().execute_with(|| { ReceivedData::set(vec![]); let publisher = ParaId::from(1000); @@ -21,7 +22,7 @@ fn process_child_trie_data_with_new_data_calls_handler() { let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); - Pallet::::process_child_trie_data(&proof); + Pallet::::process_relay_proof_keys(&proof); let received = ReceivedData::get(); assert_eq!(received.len(), 1); @@ -39,7 +40,7 @@ fn process_empty_subscriptions() { let proof = build_sproof_with_child_data(ParaId::from(1000), vec![]); - Pallet::::process_child_trie_data(&proof); + Pallet::::process_relay_proof_keys(&proof); assert_eq!(ReceivedData::get().len(), 0); }); @@ -58,13 +59,13 @@ fn root_change_triggers_processing() { // First block let proof1 = build_sproof_with_child_data(publisher, vec![(key.clone(), value1.clone())]); - Pallet::::process_child_trie_data(&proof1); + Pallet::::process_relay_proof_keys(&proof1); assert_eq!(ReceivedData::get().len(), 1); // Second block with different value (root changed) ReceivedData::set(vec![]); let proof2 = build_sproof_with_child_data(publisher, vec![(key.clone(), value2.clone())]); - Pallet::::process_child_trie_data(&proof2); + Pallet::::process_relay_proof_keys(&proof2); assert_eq!(ReceivedData::get().len(), 1); assert_eq!(ReceivedData::get()[0].2, Vec::::decode(&mut &value2[..]).unwrap()); @@ -83,13 +84,13 @@ fn unchanged_root_skips_processing() { // First block let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); - Pallet::::process_child_trie_data(&proof); + Pallet::::process_relay_proof_keys(&proof); assert_eq!(ReceivedData::get().len(), 1); // Second block with same data (unchanged root) ReceivedData::set(vec![]); let proof2 = build_sproof_with_child_data(publisher, vec![(key.clone(), value)]); - Pallet::::process_child_trie_data(&proof2); + Pallet::::process_relay_proof_keys(&proof2); assert_eq!(ReceivedData::get().len(), 0, "Handler should not be called for unchanged root"); }); @@ -103,7 +104,7 @@ fn clear_stored_roots_extrinsic() { // Store some roots let proof = build_sproof_with_child_data(publisher, vec![(vec![0x01], vec![0x11].encode())]); - Pallet::::process_child_trie_data(&proof); + Pallet::::process_relay_proof_keys(&proof); assert!(!PreviousPublishedDataRoots::::get().is_empty()); @@ -126,7 +127,7 @@ fn data_processed_event_emitted() { TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); - Pallet::::process_child_trie_data(&proof); + Pallet::::process_relay_proof_keys(&proof); // value_size is the decoded Vec length, not the encoded length let decoded_len = Vec::::decode(&mut &value[..]).unwrap().len() as u32; From 9dc977629281b5ff80ff8d4cacfe177104e13123 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 11:18:18 -0300 Subject: [PATCH 29/69] choir: unnecesary import --- cumulus/pallets/subscriber/src/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cumulus/pallets/subscriber/src/tests.rs b/cumulus/pallets/subscriber/src/tests.rs index 8559895f87d5f..d945d03185616 100644 --- a/cumulus/pallets/subscriber/src/tests.rs +++ b/cumulus/pallets/subscriber/src/tests.rs @@ -6,7 +6,6 @@ use super::*; use crate::mock::*; use codec::Encode; -use cumulus_pallet_parachain_system::relay_state_snapshot::ProcessRelayProofKeys; use cumulus_primitives_core::ParaId; use frame_support::assert_ok; From cb07bcf15209092553713ebb3592d920caac62ed Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 13:11:22 -0300 Subject: [PATCH 30/69] feat: slight generalization of Subscriber --- cumulus/pallets/subscriber/src/lib.rs | 11 ++++++++++- cumulus/pallets/subscriber/src/mock.rs | 7 ++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index a513683d5c42b..7a9d1cfa59f6b 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -17,6 +17,10 @@ #![cfg_attr(not(feature = "std"), no_std)] //! Process child trie data from relay chain state proofs via configurable handler. +//! +//! This pallet is heavily opinionated toward a parachain-to-parachain publish-subscribe model. +//! It assumes ParaId as the identifier for each child trie and is designed specifically for +//! extracting published data from relay chain proofs in a pubsub mechanism. extern crate alloc; @@ -59,8 +63,13 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { + /// Handler for defining subscriptions and processing received data. type SubscriptionHandler: SubscriptionHandler; + /// Weight information for extrinsics and operations. type WeightInfo: WeightInfo; + /// Prefix for the child trie storage key used to identify published data. + #[pallet::constant] + type ChildTriePrefix: Get<&'static [u8]>; } /// Child trie roots from previous block for change detection. @@ -137,7 +146,7 @@ pub mod pallet { fn derive_storage_key(publisher_para_id: ParaId) -> Vec { use codec::Encode; - (b"pubsub", publisher_para_id).encode() + (T::ChildTriePrefix::get(), publisher_para_id).encode() } fn derive_child_info(publisher_para_id: ParaId) -> sp_core::storage::ChildInfo { diff --git a/cumulus/pallets/subscriber/src/mock.rs b/cumulus/pallets/subscriber/src/mock.rs index 86a26737bef00..65a4233062489 100644 --- a/cumulus/pallets/subscriber/src/mock.rs +++ b/cumulus/pallets/subscriber/src/mock.rs @@ -43,9 +43,14 @@ impl SubscriptionHandler for TestHandler { } } +parameter_types! { + pub const TestChildTriePrefix: &'static [u8] = b"pubsub"; +} + impl crate::Config for Test { type SubscriptionHandler = TestHandler; type WeightInfo = (); + type ChildTriePrefix = TestChildTriePrefix; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -65,7 +70,7 @@ pub fn build_sproof_with_child_data( let mut backend = TrieBackendBuilder::new(db, root).build(); // Derive child info same way as pallet - let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", publisher_para_id).encode()); + let child_info = sp_core::storage::ChildInfo::new_default(&(TestChildTriePrefix::get(), publisher_para_id).encode()); // Insert child trie data let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); From 485b91acd8c0e211097b45b0b2e757e17ecf1e25 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 15:36:34 -0300 Subject: [PATCH 31/69] Revert "feat: slight generalization of Subscriber" This reverts commit e6cd6189e128d00eb5d2348a5d15b9597b6aa5ab. --- cumulus/pallets/subscriber/src/lib.rs | 11 +---------- cumulus/pallets/subscriber/src/mock.rs | 7 +------ 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 7a9d1cfa59f6b..a513683d5c42b 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -17,10 +17,6 @@ #![cfg_attr(not(feature = "std"), no_std)] //! Process child trie data from relay chain state proofs via configurable handler. -//! -//! This pallet is heavily opinionated toward a parachain-to-parachain publish-subscribe model. -//! It assumes ParaId as the identifier for each child trie and is designed specifically for -//! extracting published data from relay chain proofs in a pubsub mechanism. extern crate alloc; @@ -63,13 +59,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - /// Handler for defining subscriptions and processing received data. type SubscriptionHandler: SubscriptionHandler; - /// Weight information for extrinsics and operations. type WeightInfo: WeightInfo; - /// Prefix for the child trie storage key used to identify published data. - #[pallet::constant] - type ChildTriePrefix: Get<&'static [u8]>; } /// Child trie roots from previous block for change detection. @@ -146,7 +137,7 @@ pub mod pallet { fn derive_storage_key(publisher_para_id: ParaId) -> Vec { use codec::Encode; - (T::ChildTriePrefix::get(), publisher_para_id).encode() + (b"pubsub", publisher_para_id).encode() } fn derive_child_info(publisher_para_id: ParaId) -> sp_core::storage::ChildInfo { diff --git a/cumulus/pallets/subscriber/src/mock.rs b/cumulus/pallets/subscriber/src/mock.rs index 65a4233062489..86a26737bef00 100644 --- a/cumulus/pallets/subscriber/src/mock.rs +++ b/cumulus/pallets/subscriber/src/mock.rs @@ -43,14 +43,9 @@ impl SubscriptionHandler for TestHandler { } } -parameter_types! { - pub const TestChildTriePrefix: &'static [u8] = b"pubsub"; -} - impl crate::Config for Test { type SubscriptionHandler = TestHandler; type WeightInfo = (); - type ChildTriePrefix = TestChildTriePrefix; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -70,7 +65,7 @@ pub fn build_sproof_with_child_data( let mut backend = TrieBackendBuilder::new(db, root).build(); // Derive child info same way as pallet - let child_info = sp_core::storage::ChildInfo::new_default(&(TestChildTriePrefix::get(), publisher_para_id).encode()); + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", publisher_para_id).encode()); // Insert child trie data let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); From b53d823da7e4aac3d331c1b78d40f86fe8b2333b Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 16:26:40 -0300 Subject: [PATCH 32/69] feat: cleanup messy sub weights and comments --- cumulus/pallets/subscriber/src/lib.rs | 64 +++++++++++++------------ cumulus/pallets/subscriber/src/tests.rs | 58 ++++++++++++++++++++-- 2 files changed, 86 insertions(+), 36 deletions(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index a513683d5c42b..3b73690231b52 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -17,6 +17,10 @@ #![cfg_attr(not(feature = "std"), no_std)] //! Process child trie data from relay chain state proofs via configurable handler. +//! +//! This pallet is heavily opinionated toward a parachain-to-parachain publish-subscribe model. +//! It assumes ParaId as the identifier for each child trie and is designed specifically for +//! extracting published data from relay chain proofs in a pubsub mechanism. extern crate alloc; @@ -59,7 +63,9 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { + /// Handler for defining subscriptions and processing received data. type SubscriptionHandler: SubscriptionHandler; + /// Weight information for extrinsics and operations. type WeightInfo: WeightInfo; } @@ -80,36 +86,40 @@ pub mod pallet { key: BoundedVec>, value_size: u32, }, - /// Stored publisher roots were cleared. - RootsCleared { count: u32 }, + /// A stored publisher root was cleared. + PublisherRootCleared { publisher: ParaId }, } #[pallet::error] pub enum Error { - /// No stored roots to clear. - NoStoredRoots, + /// Publisher root not found. + PublisherRootNotFound, } #[pallet::call] impl Pallet { - /// Clear all stored publisher roots. + /// Clear the stored root for a specific publisher. /// - /// This forces reprocessing of all subscribed data in the next block. - /// Useful for recovery scenarios or manual cache invalidation. + /// This forces reprocessing of data from that publisher in the next block. + /// Useful for recovery scenarios or when a specific publisher's data needs to be refreshed. /// /// - `origin`: Must be root. + /// - `publisher`: The ParaId of the publisher whose root should be cleared. #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::clear_stored_roots())] - pub fn clear_stored_roots(origin: OriginFor) -> DispatchResult { + pub fn clear_stored_roots( + origin: OriginFor, + publisher: ParaId, + ) -> DispatchResult { ensure_root(origin)?; - let roots = >::get(); - ensure!(!roots.is_empty(), Error::::NoStoredRoots); - - let count = roots.len() as u32; - >::kill(); + >::mutate(|roots| -> DispatchResult { + ensure!(roots.contains_key(&publisher), Error::::PublisherRootNotFound); + roots.remove(&publisher); + Ok(()) + })?; - Self::deposit_event(Event::RootsCleared { count }); + Self::deposit_event(Event::PublisherRootCleared { publisher }); Ok(()) } } @@ -135,6 +145,10 @@ pub mod pallet { cumulus_primitives_core::RelayProofRequest { keys: storage_keys } } + /// Derives the child trie storage key for a publisher. + /// + /// Uses the same encoding pattern as the broadcaster pallet: + /// `(b"pubsub", para_id).encode()` to ensure compatibility. fn derive_storage_key(publisher_para_id: ParaId) -> Vec { use codec::Encode; (b"pubsub", publisher_para_id).encode() @@ -174,10 +188,6 @@ pub mod pallet { return T::DbWeight::get().reads(1); } - let mut p = 0u32; - let mut k = 0u32; - let mut v = 0u32; - let current_roots_map: BTreeMap> = current_roots.iter().map(|(para_id, root)| (*para_id, root.clone())).collect(); @@ -214,9 +224,6 @@ pub mod pallet { value_size, }); } - - v = v.max(value_size); - k += 1; }, Err(_) => { defensive!("Failed to decode published data value"); @@ -231,8 +238,6 @@ pub mod pallet { }, } } - - p += 1; } } @@ -247,7 +252,7 @@ pub mod pallet { .unwrap_or_default(); >::put(bounded_roots); - T::WeightInfo::process_published_data(p, k, v) + T::WeightInfo::process_published_data() } } @@ -264,17 +269,14 @@ pub mod pallet { } pub trait WeightInfo { - fn process_published_data(p: u32, k: u32, v: u32) -> Weight; + fn process_published_data() -> Weight; fn clear_stored_roots() -> Weight; } impl WeightInfo for () { - fn process_published_data(_p: u32, k: u32, v: u32) -> Weight { - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(5_000 * k as u64, 0)) - .saturating_add(Weight::from_parts(100 * v as u64, 0)) - .saturating_add(frame_support::weights::constants::RocksDbWeight::get().reads(1 + k as u64)) - .saturating_add(frame_support::weights::constants::RocksDbWeight::get().writes(1)) + fn process_published_data() -> Weight { + // TODO: Replace with proper benchmarked weights + Weight::from_parts(10_000, 0) } fn clear_stored_roots() -> Weight { diff --git a/cumulus/pallets/subscriber/src/tests.rs b/cumulus/pallets/subscriber/src/tests.rs index d945d03185616..c6a352e7787b1 100644 --- a/cumulus/pallets/subscriber/src/tests.rs +++ b/cumulus/pallets/subscriber/src/tests.rs @@ -101,16 +101,64 @@ fn clear_stored_roots_extrinsic() { let publisher = ParaId::from(1000); TestSubscriptions::set(vec![(publisher, vec![vec![0x01]])]); - // Store some roots + // Store a root for the publisher let proof = build_sproof_with_child_data(publisher, vec![(vec![0x01], vec![0x11].encode())]); Pallet::::process_relay_proof_keys(&proof); - assert!(!PreviousPublishedDataRoots::::get().is_empty()); + // Verify root is stored + assert!(PreviousPublishedDataRoots::::get().contains_key(&publisher)); - // Clear roots - assert_ok!(Pallet::::clear_stored_roots(frame_system::RawOrigin::Root.into())); + // Clear the publisher's root + assert_ok!(Pallet::::clear_stored_roots( + frame_system::RawOrigin::Root.into(), + publisher + )); - assert!(PreviousPublishedDataRoots::::get().is_empty()); + // Verify the root was cleared + assert!(!PreviousPublishedDataRoots::::get().contains_key(&publisher)); + }); +} + +#[test] +fn clear_stored_roots_only_clears_specified_publisher() { + new_test_ext().execute_with(|| { + let publisher1 = ParaId::from(1000); + let publisher2 = ParaId::from(2000); + + // Manually set up storage with 2 publisher roots + let mut roots = BoundedBTreeMap::new(); + roots.try_insert(publisher1, BoundedVec::try_from(vec![0u8; 32]).unwrap()).unwrap(); + roots.try_insert(publisher2, BoundedVec::try_from(vec![1u8; 32]).unwrap()).unwrap(); + PreviousPublishedDataRoots::::put(roots); + + assert_eq!(PreviousPublishedDataRoots::::get().len(), 2); + + // Clear only publisher1's root + assert_ok!(Pallet::::clear_stored_roots( + frame_system::RawOrigin::Root.into(), + publisher1 + )); + + // Publisher1's root should be cleared, but publisher2's should remain + let roots = PreviousPublishedDataRoots::::get(); + assert_eq!(roots.len(), 1); + assert!(!roots.contains_key(&publisher1)); + assert!(roots.contains_key(&publisher2)); + }); +} + +#[test] +fn clear_stored_roots_fails_if_not_found() { + use frame_support::assert_noop; + + new_test_ext().execute_with(|| { + let publisher = ParaId::from(1000); + + // Try to clear root for publisher that doesn't exist + assert_noop!( + Pallet::::clear_stored_roots(frame_system::RawOrigin::Root.into(), publisher), + Error::::PublisherRootNotFound + ); }); } From 5a9d8394ed3e12b12bfaa128ec4648bd249d745b Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 17:35:17 -0300 Subject: [PATCH 33/69] feat: more Subscriber optimizations --- cumulus/pallets/subscriber/src/lib.rs | 61 ++++++++++++++------------ cumulus/pallets/subscriber/src/mock.rs | 5 +++ 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 3b73690231b52..8e9dd46425d0c 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -34,7 +34,7 @@ use frame_support::{ defensive, pallet_prelude::*, storage::bounded_btree_map::BoundedBTreeMap, - traits::Get, + traits::{Get, StorageVersion}, }; use sp_std::vec; @@ -58,7 +58,10 @@ pub mod pallet { use super::*; use frame_system::pallet_prelude::*; + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -67,13 +70,16 @@ pub mod pallet { type SubscriptionHandler: SubscriptionHandler; /// Weight information for extrinsics and operations. type WeightInfo: WeightInfo; + /// Maximum number of publishers that can be tracked simultaneously. + #[pallet::constant] + type MaxPublishers: Get; } /// Child trie roots from previous block for change detection. #[pallet::storage] pub type PreviousPublishedDataRoots = StorageValue< _, - BoundedBTreeMap>, ConstU32<100>>, + BoundedBTreeMap>, T::MaxPublishers>, ValueQuery, >; @@ -83,7 +89,7 @@ pub mod pallet { /// Data was received and processed from a publisher. DataProcessed { publisher: ParaId, - key: BoundedVec>, + key: Vec, value_size: u32, }, /// A stored publisher root was cleared. @@ -113,9 +119,8 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - >::mutate(|roots| -> DispatchResult { - ensure!(roots.contains_key(&publisher), Error::::PublisherRootNotFound); - roots.remove(&publisher); + >::try_mutate(|roots| -> DispatchResult { + roots.remove(&publisher).ok_or(Error::::PublisherRootNotFound)?; Ok(()) })?; @@ -160,20 +165,20 @@ pub mod pallet { fn collect_publisher_roots( relay_state_proof: &RelayChainStateProof, + subscriptions: &[(ParaId, Vec>)], ) -> Vec<(ParaId, Vec)> { - let subscriptions = T::SubscriptionHandler::subscriptions(); - subscriptions - .into_iter() + .iter() + .take(T::MaxPublishers::get() as usize) .filter_map(|(publisher_para_id, _keys)| { - let child_info = Self::derive_child_info(publisher_para_id); + let child_info = Self::derive_child_info(*publisher_para_id); let prefixed_key = child_info.prefixed_storage_key(); relay_state_proof .read_optional_entry::<[u8; 32]>(&*prefixed_key) .ok() .flatten() - .map(|root_hash| (publisher_para_id, root_hash.to_vec())) + .map(|root_hash| (*publisher_para_id, root_hash.to_vec())) }) .collect() } @@ -181,6 +186,7 @@ pub mod pallet { fn process_published_data( relay_state_proof: &RelayChainStateProof, current_roots: &Vec<(ParaId, Vec)>, + subscriptions: &[(ParaId, Vec>)], ) -> Weight { let previous_roots = >::get(); @@ -191,19 +197,17 @@ pub mod pallet { let current_roots_map: BTreeMap> = current_roots.iter().map(|(para_id, root)| (*para_id, root.clone())).collect(); - let subscriptions = T::SubscriptionHandler::subscriptions(); - for (publisher, subscription_keys) in subscriptions { - let should_update = match previous_roots.get(&publisher) { - Some(prev_root) => match current_roots_map.get(&publisher) { + let should_update = match previous_roots.get(publisher) { + Some(prev_root) => match current_roots_map.get(publisher) { Some(curr_root) if prev_root == curr_root => false, _ => true, }, None => true, }; - if should_update && current_roots_map.contains_key(&publisher) { - let child_info = Self::derive_child_info(publisher); + if should_update && current_roots_map.contains_key(publisher) { + let child_info = Self::derive_child_info(*publisher); for key in subscription_keys.iter() { match relay_state_proof.read_child_storage(&child_info, key) { @@ -212,18 +216,16 @@ pub mod pallet { Ok(value) => { let value_size = value.len() as u32; T::SubscriptionHandler::on_data_updated( - publisher, + *publisher, key.clone(), value.clone(), ); - if let Ok(bounded_key) = BoundedVec::try_from(key.clone()) { - Self::deposit_event(Event::DataProcessed { - publisher, - key: bounded_key, - value_size, - }); - } + Self::deposit_event(Event::DataProcessed { + publisher: *publisher, + key: key.clone(), + value_size, + }); }, Err(_) => { defensive!("Failed to decode published data value"); @@ -241,7 +243,7 @@ pub mod pallet { } } - let bounded_roots: BoundedBTreeMap>, ConstU32<100>> = + let bounded_roots: BoundedBTreeMap>, T::MaxPublishers> = current_roots_map .into_iter() .filter_map(|(para_id, root)| { @@ -249,7 +251,7 @@ pub mod pallet { }) .collect::>() .try_into() - .unwrap_or_default(); + .expect("MaxPublishers limit enforced in collect_publisher_roots; qed"); >::put(bounded_roots); T::WeightInfo::process_published_data() @@ -262,8 +264,9 @@ pub mod pallet { /// Note: This implementation only processes child trie keys (pubsub data). /// Main trie keys in the proof are intentionally ignored. fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight { - let current_roots = Self::collect_publisher_roots(verified_proof); - Self::process_published_data(verified_proof, ¤t_roots) + let subscriptions = T::SubscriptionHandler::subscriptions(); + let current_roots = Self::collect_publisher_roots(verified_proof, &subscriptions); + Self::process_published_data(verified_proof, ¤t_roots, &subscriptions) } } } diff --git a/cumulus/pallets/subscriber/src/mock.rs b/cumulus/pallets/subscriber/src/mock.rs index 86a26737bef00..b39a52c5968c3 100644 --- a/cumulus/pallets/subscriber/src/mock.rs +++ b/cumulus/pallets/subscriber/src/mock.rs @@ -43,9 +43,14 @@ impl SubscriptionHandler for TestHandler { } } +parameter_types! { + pub const MaxPublishers: u32 = 100; +} + impl crate::Config for Test { type SubscriptionHandler = TestHandler; type WeightInfo = (); + type MaxPublishers = MaxPublishers; } pub fn new_test_ext() -> sp_io::TestExternalities { From 3c6fb0775b048d79793751bab2ad092bd5cc5aef Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 18:27:22 -0300 Subject: [PATCH 34/69] choir: Subscriber comments and optimization --- cumulus/pallets/subscriber/src/lib.rs | 88 +++++++++++++++------------ 1 file changed, 48 insertions(+), 40 deletions(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 8e9dd46425d0c..72fb84cb616a8 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -188,61 +188,69 @@ pub mod pallet { current_roots: &Vec<(ParaId, Vec)>, subscriptions: &[(ParaId, Vec>)], ) -> Weight { + // Load roots from previous block for change detection. let previous_roots = >::get(); + // Early exit if no publishers have any data. if current_roots.is_empty() && previous_roots.is_empty() { return T::DbWeight::get().reads(1); } + // Convert to map for efficient lookup by ParaId. let current_roots_map: BTreeMap> = current_roots.iter().map(|(para_id, root)| (*para_id, root.clone())).collect(); + // Process each subscription. for (publisher, subscription_keys) in subscriptions { - let should_update = match previous_roots.get(publisher) { - Some(prev_root) => match current_roots_map.get(publisher) { - Some(curr_root) if prev_root == curr_root => false, - _ => true, - }, - None => true, - }; - - if should_update && current_roots_map.contains_key(publisher) { - let child_info = Self::derive_child_info(*publisher); - - for key in subscription_keys.iter() { - match relay_state_proof.read_child_storage(&child_info, key) { - Ok(Some(encoded_value)) => { - match Vec::::decode(&mut &encoded_value[..]) { - Ok(value) => { - let value_size = value.len() as u32; - T::SubscriptionHandler::on_data_updated( - *publisher, - key.clone(), - value.clone(), - ); - - Self::deposit_event(Event::DataProcessed { - publisher: *publisher, - key: key.clone(), - value_size, - }); - }, - Err(_) => { - defensive!("Failed to decode published data value"); - }, - } - }, - Ok(None) => { - // Key not published yet - expected - }, - Err(_) => { - defensive!("Failed to read child storage from relay chain proof"); - }, + // Check if publisher has published data in this block. + if let Some(current_root) = current_roots_map.get(publisher) { + // Detect if child trie root changed since last block. + let should_update = previous_roots + .get(publisher) + .map_or(true, |prev_root| prev_root.as_slice() != current_root.as_slice()); + + // Only process if data changed. + if should_update { + let child_info = Self::derive_child_info(*publisher); + + // Read each subscribed key from relay proof. + for key in subscription_keys.iter() { + match relay_state_proof.read_child_storage(&child_info, key) { + Ok(Some(encoded_value)) => { + match Vec::::decode(&mut &encoded_value[..]) { + Ok(value) => { + let value_size = value.len() as u32; + // Notify handler of new data. + T::SubscriptionHandler::on_data_updated( + *publisher, + key.clone(), + value.clone(), + ); + + Self::deposit_event(Event::DataProcessed { + publisher: *publisher, + key: key.clone(), + value_size, + }); + }, + Err(_) => { + defensive!("Failed to decode published data value"); + }, + } + }, + Ok(None) => { + // Key not published yet - expected. + }, + Err(_) => { + defensive!("Failed to read child storage from relay chain proof"); + }, + } } } } } + // Store current roots for next block's comparison. let bounded_roots: BoundedBTreeMap>, T::MaxPublishers> = current_roots_map .into_iter() From ac992585aeac04194ed6751d92e49419affa4a2f Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 19:20:09 -0300 Subject: [PATCH 35/69] feat: Subscriber benchmarks first approach --- Cargo.lock | 1 + cumulus/pallets/subscriber/Cargo.toml | 22 +++ .../pallets/subscriber/src/benchmarking.rs | 184 ++++++++++++++++++ cumulus/pallets/subscriber/src/lib.rs | 47 ++++- 4 files changed, 245 insertions(+), 9 deletions(-) create mode 100644 cumulus/pallets/subscriber/src/benchmarking.rs diff --git a/Cargo.lock b/Cargo.lock index c3f17722d58ba..3089aff2c56cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4787,6 +4787,7 @@ version = "0.1.0" dependencies = [ "cumulus-pallet-parachain-system", "cumulus-primitives-core", + "frame-benchmarking", "frame-support", "frame-system", "parity-scale-codec", diff --git a/cumulus/pallets/subscriber/Cargo.toml b/cumulus/pallets/subscriber/Cargo.toml index 4eecadef89fd6..98574da962399 100644 --- a/cumulus/pallets/subscriber/Cargo.toml +++ b/cumulus/pallets/subscriber/Cargo.toml @@ -23,6 +23,13 @@ sp-std = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-primitives-core = { workspace = true } +# Benchmarking +frame-benchmarking = { optional = true, workspace = true } +polkadot-primitives = { optional = true, workspace = true } +sp-runtime = { optional = true, workspace = true } +sp-state-machine = { optional = true, workspace = true } +sp-trie = { optional = true, workspace = true } + [dev-dependencies] polkadot-primitives = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } @@ -36,9 +43,24 @@ std = [ "codec/std", "cumulus-pallet-parachain-system/std", "cumulus-primitives-core/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "polkadot-primitives?/std", "scale-info/std", "sp-core/std", + "sp-runtime?/std", + "sp-state-machine?/std", "sp-std/std", + "sp-trie?/std", +] +runtime-benchmarks = [ + "cumulus-pallet-parachain-system/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "polkadot-primitives", + "sp-runtime", + "sp-state-machine", + "sp-trie", ] diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs new file mode 100644 index 0000000000000..a39a084e48e40 --- /dev/null +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -0,0 +1,184 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking setup for cumulus-pallet-subscriber + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use crate::Pallet as Subscriber; +use codec::Encode; +use cumulus_pallet_parachain_system::RelayChainStateProof; +use cumulus_primitives_core::ParaId; +use frame_benchmarking::v2::*; +use frame_support::traits::Get; +use frame_system::RawOrigin; +use sp_runtime::{traits::HashingFor, StateVersion}; +use sp_state_machine::{Backend, TrieBackendBuilder}; +use sp_trie::PrefixedMemoryDB; + +/// Build a relay chain state proof with child trie data for multiple publishers. +fn build_test_proof( + publishers: &[(ParaId, Vec<(Vec, Vec)>)], +) -> RelayChainStateProof { + let (db, root) = PrefixedMemoryDB::>::default_with_root(); + let state_version = StateVersion::default(); + let mut backend = TrieBackendBuilder::new(db, root).build(); + + let mut all_proofs = vec![]; + let mut main_trie_updates = vec![]; + + // Process each publisher + for (publisher_para_id, child_data) in publishers { + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); + + // Insert child trie data + let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); + backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); + + // Get child trie root and prepare to insert it in main trie + let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; + let prefixed_key = child_info.prefixed_storage_key(); + main_trie_updates.push((prefixed_key.to_vec(), Some(child_root.encode()))); + + // Prove child trie keys + let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); + if !child_keys.is_empty() { + let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) + .expect("prove child read"); + all_proofs.push(child_proof); + } + } + + // Insert all child roots in main trie + backend.insert(vec![(None, main_trie_updates.clone())], state_version); + let root = *backend.root(); + + // Prove all child roots in main trie + let main_keys: Vec<_> = main_trie_updates.iter().map(|(k, _)| k.clone()).collect(); + let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, main_keys) + .expect("prove read"); + all_proofs.push(main_proof); + + // Merge all proofs + let proof = sp_trie::StorageProof::merge(all_proofs); + + RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") +} + +/// Create test subscriptions for benchmarking. +fn create_subscriptions(n: u32, keys_per_publisher: u32) -> Vec<(ParaId, Vec>)> { + (0..n) + .map(|i| { + let para_id = ParaId::from(1000 + i); + let keys: Vec> = if keys_per_publisher == 0 { + vec![vec![i as u8], vec![i as u8, i as u8]] + } else { + (0..keys_per_publisher).map(|j| vec![i as u8, j as u8]).collect() + }; + (para_id, keys) + }) + .collect() +} + +#[benchmarks] +mod benchmarks { + use super::*; + + /// Benchmark calling `SubscriptionHandler::subscriptions()`. + /// + /// Cost scales with both number of publishers `n` and keys per publisher `k`. + #[benchmark] + fn get_subscriptions( + n: Linear<1, { T::MaxPublishers::get() }>, + k: Linear<1, 10>, + ) { + let _subscriptions = create_subscriptions(n, k); + #[block] + { + let _subs = T::SubscriptionHandler::subscriptions(); + } + } + + /// Benchmark collecting publisher roots from the relay state proof. + /// + /// Cost scales with the number of publishers `n`. + #[benchmark] + fn collect_publisher_roots( + n: Linear<1, { T::MaxPublishers::get() }>, + ) { + let subscriptions = create_subscriptions(n, 1); + let publishers: Vec<_> = (0..n) + .map(|i| (ParaId::from(1000 + i), vec![(vec![i as u8], vec![25u8])])) + .collect(); + let proof = build_test_proof::(&publishers); + + #[block] + { + Subscriber::::collect_publisher_roots(&proof, &subscriptions); + } + } + + /// Benchmark processing published data from the relay proof. + /// + /// Worst case: all `n` publishers have updated data with `k` keys each that need processing. + #[benchmark] + fn process_published_data( + n: Linear<1, { T::MaxPublishers::get() }>, + k: Linear<1, 10>, + ) { + let subscriptions = create_subscriptions(n, k); + let publishers: Vec<_> = (0..n) + .map(|i| { + let para_id = ParaId::from(1000 + i); + let child_data: Vec<(Vec, Vec)> = (0..k) + .map(|j| { + let value = vec![25u8; 100]; + let encoded_value = value.encode(); + (vec![i as u8, j as u8], encoded_value) + }) + .collect(); + (para_id, child_data) + }) + .collect(); + let proof = build_test_proof::(&publishers); + let current_roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); + + #[block] + { + Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); + } + } + + #[benchmark] + fn clear_stored_roots() { + let publisher = ParaId::from(1000); + let root = BoundedVec::try_from(vec![0u8; 32]).unwrap(); + PreviousPublishedDataRoots::::mutate(|roots| { + let _ = roots.try_insert(publisher, root); + }); + + #[extrinsic_call] + _(RawOrigin::Root, publisher); + + assert!(!PreviousPublishedDataRoots::::get().contains_key(&publisher)); + } + + impl_benchmark_test_suite! { + Subscriber, + crate::mock::new_test_ext(), + crate::mock::Test + } +} diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 72fb84cb616a8..401e90883f56a 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -40,6 +40,8 @@ use sp_std::vec; pub use pallet::*; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; mod mock; #[cfg(test)] mod tests; @@ -163,7 +165,7 @@ pub mod pallet { sp_core::storage::ChildInfo::new_default(&Self::derive_storage_key(publisher_para_id)) } - fn collect_publisher_roots( + pub fn collect_publisher_roots( relay_state_proof: &RelayChainStateProof, subscriptions: &[(ParaId, Vec>)], ) -> Vec<(ParaId, Vec)> { @@ -183,17 +185,17 @@ pub mod pallet { .collect() } - fn process_published_data( + pub fn process_published_data( relay_state_proof: &RelayChainStateProof, current_roots: &Vec<(ParaId, Vec)>, subscriptions: &[(ParaId, Vec>)], - ) -> Weight { + ) { // Load roots from previous block for change detection. let previous_roots = >::get(); // Early exit if no publishers have any data. if current_roots.is_empty() && previous_roots.is_empty() { - return T::DbWeight::get().reads(1); + return; } // Convert to map for efficient lookup by ParaId. @@ -261,8 +263,6 @@ pub mod pallet { .try_into() .expect("MaxPublishers limit enforced in collect_publisher_roots; qed"); >::put(bounded_roots); - - T::WeightInfo::process_published_data() } } @@ -273,23 +273,52 @@ pub mod pallet { /// Main trie keys in the proof are intentionally ignored. fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight { let subscriptions = T::SubscriptionHandler::subscriptions(); + let num_publishers = subscriptions.len() as u32; + let keys_per_publisher = subscriptions + .first() + .map(|(_, keys)| keys.len() as u32) + .unwrap_or(0); + let current_roots = Self::collect_publisher_roots(verified_proof, &subscriptions); - Self::process_published_data(verified_proof, ¤t_roots, &subscriptions) + Self::process_published_data(verified_proof, ¤t_roots, &subscriptions); + + // Return total weight for all operations + T::WeightInfo::process_relay_proof_keys(num_publishers, keys_per_publisher) } } } pub trait WeightInfo { - fn process_published_data() -> Weight; + fn get_subscriptions(n: u32, k: u32) -> Weight; + fn collect_publisher_roots(n: u32) -> Weight; + fn process_published_data(n: u32, k: u32) -> Weight; fn clear_stored_roots() -> Weight; + + /// Total weight consumed by process_relay_proof_keys + /// Composes the weights of all sub-operations + fn process_relay_proof_keys(num_publishers: u32, keys_per_publisher: u32) -> Weight { + Self::get_subscriptions(num_publishers, keys_per_publisher) + .saturating_add(Self::collect_publisher_roots(num_publishers)) + .saturating_add(Self::process_published_data(num_publishers, keys_per_publisher)) + } } impl WeightInfo for () { - fn process_published_data() -> Weight { + fn get_subscriptions(_n: u32, _k: u32) -> Weight { + // TODO: Replace with proper benchmarked weights + Weight::from_parts(5_000, 0) + } + + fn collect_publisher_roots(_n: u32) -> Weight { // TODO: Replace with proper benchmarked weights Weight::from_parts(10_000, 0) } + fn process_published_data(_n: u32, _k: u32) -> Weight { + // TODO: Replace with proper benchmarked weights + Weight::from_parts(50_000, 0) + } + fn clear_stored_roots() -> Weight { frame_support::weights::constants::RocksDbWeight::get().reads_writes(1, 1) } From 6e26188d4dfa68c0ff4a123b5197120e6972e39d Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 12 Dec 2025 20:26:46 -0300 Subject: [PATCH 36/69] choir: pubsubConsumer cleanup --- cumulus/pallets/pubsub-consumer/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cumulus/pallets/pubsub-consumer/src/lib.rs b/cumulus/pallets/pubsub-consumer/src/lib.rs index 8d917119c1131..89877ae9b50c7 100644 --- a/cumulus/pallets/pubsub-consumer/src/lib.rs +++ b/cumulus/pallets/pubsub-consumer/src/lib.rs @@ -52,13 +52,12 @@ impl cumulus_pallet_subscriber::SubscriptionHandler for TestSubscript #[frame_support::pallet] pub mod pallet { use super::*; - use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config>> {} + pub trait Config: frame_system::Config {} #[pallet::storage] pub type ReceivedData = StorageDoubleMap< From c1e05c1b03f319ebce605249d96bcbf92d4cd288 Mon Sep 17 00:00:00 2001 From: metricaez Date: Sat, 13 Dec 2025 15:31:06 -0300 Subject: [PATCH 37/69] feat: keys into hashes --- .../src/broadcaster/benchmarking.rs | 6 +- .../runtime/parachains/src/broadcaster/mod.rs | 49 ++----- .../parachains/src/broadcaster/tests.rs | 132 ++++++++---------- .../parachains/src/broadcaster/traits.rs | 5 +- polkadot/runtime/parachains/src/mock.rs | 2 - polkadot/runtime/rococo/src/lib.rs | 2 - .../src/generic/benchmarking.rs | 11 +- polkadot/xcm/src/v5/mod.rs | 8 +- .../xcm/xcm-builder/src/broadcast_adapter.rs | 25 ++-- polkadot/xcm/xcm-builder/src/test_utils.rs | 6 +- polkadot/xcm/xcm-builder/src/tests/publish.rs | 27 ++-- 11 files changed, 126 insertions(+), 147 deletions(-) diff --git a/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs index ec852d9db81ff..899f34f87864b 100644 --- a/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs +++ b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs @@ -20,6 +20,7 @@ use super::{Pallet as Broadcaster, *}; use frame_benchmarking::v2::*; use frame_support::traits::fungible::{Inspect as FunInspect, Mutate}; use polkadot_primitives::Id as ParaId; +use sp_core::hashing::blake2_256; type BalanceOf = <::Currency as FunInspect<::AccountId>>::Balance; @@ -71,8 +72,9 @@ mod benchmarks { let batch_end = (batch_start + max_items).min(k); let mut data = Vec::new(); for i in batch_start..batch_end { - let mut key = b"key_".to_vec(); - key.extend_from_slice(&i.to_be_bytes()); + let mut key_data = b"key_".to_vec(); + key_data.extend_from_slice(&i.to_be_bytes()); + let key = blake2_256(&key_data); data.push((key, b"value".to_vec())); } Broadcaster::::handle_publish(para_id, data).unwrap(); diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index 7f1ad095893a9..f971cbbdfaeba 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -130,12 +130,6 @@ pub mod pallet { #[pallet::constant] type MaxPublishItems: Get; - /// Maximum length of a published key in bytes. - /// - /// Must not exceed `xcm::v5::MaxPublishKeyLength`. - #[pallet::constant] - type MaxKeyLength: Get; - /// Maximum length of a published value in bytes. /// /// Must not exceed `xcm::v5::MaxPublishValueLength`. @@ -202,7 +196,7 @@ pub mod pallet { _, Twox64Concat, ParaId, - BoundedBTreeSet, T::MaxStoredKeys>, + BoundedBTreeSet<[u8; 32], T::MaxStoredKeys>, ValueQuery, >; @@ -211,8 +205,6 @@ pub mod pallet { pub enum Error { /// Too many items in a single publish operation. TooManyPublishItems, - /// Key length exceeds maximum allowed. - KeyTooLong, /// Value length exceeds maximum allowed. ValueTooLong, /// Too many unique keys stored for this publisher. @@ -240,10 +232,6 @@ pub mod pallet { T::MaxPublishItems::get() <= xcm::v5::MaxPublishItems::get(), "Broadcaster MaxPublishItems exceeds XCM MaxPublishItems upper bound" ); - assert!( - T::MaxKeyLength::get() <= xcm::v5::MaxPublishKeyLength::get(), - "Broadcaster MaxKeyLength exceeds XCM MaxPublishKeyLength upper bound" - ); assert!( T::MaxValueLength::get() <= xcm::v5::MaxPublishValueLength::get(), "Broadcaster MaxValueLength exceeds XCM MaxPublishValueLength upper bound" @@ -458,9 +446,8 @@ pub mod pallet { let published_keys = PublishedKeys::::get(para_id); // Remove all key-value pairs from the child trie - for bounded_key in published_keys.iter() { - let key: Vec = bounded_key.clone().into(); - frame_support::storage::child::kill(&child_info, &key); + for key in published_keys.iter() { + frame_support::storage::child::kill(&child_info, key); } // Clean up tracking storage @@ -539,9 +526,11 @@ pub mod pallet { /// Validates the publisher is registered, checks all bounds, and stores the provided /// key-value pairs in the publisher's dedicated child trie. Updates the child trie root /// and published keys tracking. + /// + /// Keys must be 32-byte hashes. pub fn handle_publish( origin_para_id: ParaId, - data: Vec<(Vec, Vec)>, + data: Vec<([u8; 32], Vec)>, ) -> DispatchResult { // Check publisher is registered ensure!( @@ -557,12 +546,8 @@ pub mod pallet { Error::::TooManyPublishItems ); - // Validate all keys and values before creating publisher entry - for (key, value) in &data { - ensure!( - key.len() <= T::MaxKeyLength::get() as usize, - Error::::KeyTooLong - ); + // Validate all values before creating publisher entry + for (_key, value) in &data { ensure!( value.len() <= T::MaxValueLength::get() as usize, Error::::ValueTooLong @@ -574,10 +559,8 @@ pub mod pallet { // Count new unique keys to prevent exceeding MaxStoredKeys let mut new_keys_count = 0u32; for (key, _) in &data { - if let Ok(bounded_key) = BoundedVec::try_from(key.clone()) { - if !published_keys.contains(&bounded_key) { - new_keys_count += 1; - } + if !published_keys.contains(key) { + new_keys_count += 1; } } @@ -593,10 +576,7 @@ pub mod pallet { // Write to child trie and track keys for enumeration for (key, value) in data { frame_support::storage::child::put(&child_info, &key, &value); - - if let Ok(bounded_key) = BoundedVec::try_from(key) { - published_keys.try_insert(bounded_key).defensive_ok(); - } + published_keys.try_insert(key).defensive_ok(); } PublishedKeys::::insert(origin_para_id, published_keys); @@ -648,7 +628,7 @@ pub mod pallet { /// /// Iterates over all tracked keys for the publisher and retrieves their values from the /// child trie. - pub fn get_all_published_data(para_id: ParaId) -> Vec<(Vec, Vec)> { + pub fn get_all_published_data(para_id: ParaId) -> Vec<([u8; 32], Vec)> { if !PublisherExists::::get(para_id) { return Vec::new(); } @@ -658,8 +638,7 @@ pub mod pallet { published_keys .into_iter() - .filter_map(|bounded_key| { - let key: Vec = bounded_key.into(); + .filter_map(|key| { frame_support::storage::child::get(&child_info, &key) .map(|value| (key, value)) }) @@ -675,7 +654,7 @@ pub mod pallet { // Implement Publish trait impl Publish for Pallet { - fn publish_data(publisher: ParaId, data: Vec<(Vec, Vec)>) -> DispatchResult { + fn publish_data(publisher: ParaId, data: Vec<([u8; 32], Vec)>) -> DispatchResult { Self::handle_publish(publisher, data) } } diff --git a/polkadot/runtime/parachains/src/broadcaster/tests.rs b/polkadot/runtime/parachains/src/broadcaster/tests.rs index f9df57d650f27..0938759d666dc 100644 --- a/polkadot/runtime/parachains/src/broadcaster/tests.rs +++ b/polkadot/runtime/parachains/src/broadcaster/tests.rs @@ -21,6 +21,7 @@ use frame_support::{ traits::fungible::{hold::Inspect as HoldInspect, Inspect}, }; use polkadot_primitives::Id as ParaId; +use sp_core::hashing::blake2_256; const ALICE: u64 = 1; const BOB: u64 = 2; @@ -29,6 +30,11 @@ fn setup_account(who: u64, balance: u128) { let _ = Balances::mint_into(&who, balance); } +// Helper to create hash keys from strings for tests +fn hash_key(data: &[u8]) -> [u8; 32] { + blake2_256(data) +} + fn register_test_publisher(para_id: ParaId) { setup_account(ALICE, 10000); assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); @@ -151,7 +157,7 @@ fn register_publisher_requires_sufficient_balance() { fn publish_requires_registration() { new_test_ext(Default::default()).execute_with(|| { let para_id = ParaId::from(2000); - let data = vec![(b"key".to_vec(), b"value".to_vec())]; + let data = vec![(hash_key(b"key"), b"value".to_vec())]; assert_err!( Broadcaster::handle_publish(para_id, data), @@ -170,10 +176,10 @@ fn registered_publisher_can_publish() { assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); - let data = vec![(b"key".to_vec(), b"value".to_vec())]; + let data = vec![(hash_key(b"key"), b"value".to_vec())]; assert_ok!(Broadcaster::handle_publish(para_id, data)); - assert_eq!(Broadcaster::get_published_value(para_id, b"key"), Some(b"value".to_vec())); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key")), Some(b"value".to_vec())); }); } @@ -189,7 +195,7 @@ fn publish_store_retrieve_and_update_data() { assert!(Broadcaster::get_publisher_child_root(para_id).is_none()); let initial_data = - vec![(b"key1".to_vec(), b"value1".to_vec()), (b"key2".to_vec(), b"value2".to_vec())]; + vec![(hash_key(b"key1"), b"value1".to_vec()), (hash_key(b"key2"), b"value2".to_vec())]; Broadcaster::handle_publish(para_id, initial_data.clone()).unwrap(); assert!(PublisherExists::::get(para_id)); @@ -197,13 +203,13 @@ fn publish_store_retrieve_and_update_data() { assert!(root_after_initial.is_some()); assert!(!root_after_initial.as_ref().unwrap().is_empty()); - assert_eq!(Broadcaster::get_published_value(para_id, b"key1"), Some(b"value1".to_vec())); - assert_eq!(Broadcaster::get_published_value(para_id, b"key2"), Some(b"value2".to_vec())); - assert_eq!(Broadcaster::get_published_value(para_id, b"key3"), None); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key1")), Some(b"value1".to_vec())); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key2")), Some(b"value2".to_vec())); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key3")), None); let update_data = vec![ - (b"key1".to_vec(), b"updated_value1".to_vec()), - (b"key3".to_vec(), b"value3".to_vec()), + (hash_key(b"key1"), b"updated_value1".to_vec()), + (hash_key(b"key3"), b"value3".to_vec()), ]; Broadcaster::handle_publish(para_id, update_data).unwrap(); @@ -212,14 +218,14 @@ fn publish_store_retrieve_and_update_data() { assert_ne!(root_after_initial.unwrap(), root_after_update.unwrap()); assert_eq!( - Broadcaster::get_published_value(para_id, b"key1"), + Broadcaster::get_published_value(para_id, &hash_key(b"key1")), Some(b"updated_value1".to_vec()) ); assert_eq!( - Broadcaster::get_published_value(para_id, b"key2"), + Broadcaster::get_published_value(para_id, &hash_key(b"key2")), Some(b"value2".to_vec()) // Should remain unchanged ); - assert_eq!(Broadcaster::get_published_value(para_id, b"key3"), Some(b"value3".to_vec())); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key3")), Some(b"value3".to_vec())); }); } @@ -243,7 +249,7 @@ fn handle_publish_respects_max_items_limit() { let mut data = Vec::new(); for i in 0..17 { - data.push((format!("key{}", i).into_bytes(), b"value".to_vec())); + data.push((hash_key(&format!("key{}", i).into_bytes()), b"value".to_vec())); } let result = Broadcaster::handle_publish(para_id, data); @@ -251,20 +257,6 @@ fn handle_publish_respects_max_items_limit() { }); } -#[test] -fn handle_publish_respects_key_length_limit() { - new_test_ext(Default::default()).execute_with(|| { - let para_id = ParaId::from(2000); - register_test_publisher(para_id); - - let long_key = vec![b'a'; 257]; - let data = vec![(long_key, b"value".to_vec())]; - - let result = Broadcaster::handle_publish(para_id, data); - assert!(result.is_err()); - }); -} - #[test] fn handle_publish_respects_value_length_limit() { new_test_ext(Default::default()).execute_with(|| { @@ -272,7 +264,7 @@ fn handle_publish_respects_value_length_limit() { register_test_publisher(para_id); let long_value = vec![b'v'; 1025]; - let data = vec![(b"key".to_vec(), long_value)]; + let data = vec![(hash_key(b"key"), long_value)]; let result = Broadcaster::handle_publish(para_id, data); assert!(result.is_err()); @@ -290,7 +282,7 @@ fn max_stored_keys_limit_enforced() { for i in 0..16 { let key_num = batch * 16 + i; if key_num < 100 { - data.push((format!("key{}", key_num).into_bytes(), b"value".to_vec())); + data.push((hash_key(&format!("key{}", key_num).into_bytes()), b"value".to_vec())); } } if !data.is_empty() { @@ -302,17 +294,17 @@ fn max_stored_keys_limit_enforced() { assert_eq!(published_keys.len(), 100); let result = - Broadcaster::handle_publish(para_id, vec![(b"new_key".to_vec(), b"value".to_vec())]); + Broadcaster::handle_publish(para_id, vec![(hash_key(b"new_key"), b"value".to_vec())]); assert_err!(result, Error::::TooManyStoredKeys); let result = Broadcaster::handle_publish( para_id, - vec![(b"key0".to_vec(), b"updated_value".to_vec())], + vec![(hash_key(b"key0"), b"updated_value".to_vec())], ); assert_ok!(result); assert_eq!( - Broadcaster::get_published_value(para_id, b"key0"), + Broadcaster::get_published_value(para_id, &hash_key(b"key0")), Some(b"updated_value".to_vec()) ); }); @@ -326,15 +318,15 @@ fn published_keys_storage_matches_child_trie() { // Publish multiple batches to ensure consistency maintained across updates let data1 = vec![ - (b"key1".to_vec(), b"value1".to_vec()), - (b"key2".to_vec(), b"value2".to_vec()), + (hash_key(b"key1"), b"value1".to_vec()), + (hash_key(b"key2"), b"value2".to_vec()), ]; Broadcaster::handle_publish(para_id, data1).unwrap(); // Update some keys, add new ones let data2 = vec![ - (b"key1".to_vec(), b"updated_value1".to_vec()), - (b"key3".to_vec(), b"value3".to_vec()), + (hash_key(b"key1"), b"updated_value1".to_vec()), + (hash_key(b"key3"), b"value3".to_vec()), ]; Broadcaster::handle_publish(para_id, data2).unwrap(); @@ -346,16 +338,12 @@ fn published_keys_storage_matches_child_trie() { // Every tracked key must exist in child trie for tracked_key in tracked_keys.iter() { - let key: Vec = tracked_key.clone().into(); - assert!(actual_data.iter().any(|(k, _)| k == &key)); + assert!(actual_data.iter().any(|(k, _)| k == tracked_key)); } // Every child trie key must be tracked for (actual_key, _) in actual_data.iter() { - assert!(tracked_keys.iter().any(|tracked| { - let k: Vec = tracked.clone().into(); - &k == actual_key - })); + assert!(tracked_keys.contains(actual_key)); } }); } @@ -375,9 +363,9 @@ fn multiple_publishers_in_same_block() { assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(3), para3)); // Multiple parachains publish data in the same block - let data1 = vec![(b"key1".to_vec(), b"value1".to_vec())]; - let data2 = vec![(b"key2".to_vec(), b"value2".to_vec())]; - let data3 = vec![(b"key3".to_vec(), b"value3".to_vec())]; + let data1 = vec![(hash_key(b"key1"), b"value1".to_vec())]; + let data2 = vec![(hash_key(b"key2"), b"value2".to_vec())]; + let data3 = vec![(hash_key(b"key3"), b"value3".to_vec())]; Broadcaster::handle_publish(para1, data1).unwrap(); Broadcaster::handle_publish(para2, data2).unwrap(); @@ -389,14 +377,14 @@ fn multiple_publishers_in_same_block() { assert!(PublisherExists::::get(para3)); // Verify each para's data is independently accessible - assert_eq!(Broadcaster::get_published_value(para1, b"key1"), Some(b"value1".to_vec())); - assert_eq!(Broadcaster::get_published_value(para2, b"key2"), Some(b"value2".to_vec())); - assert_eq!(Broadcaster::get_published_value(para3, b"key3"), Some(b"value3".to_vec())); + assert_eq!(Broadcaster::get_published_value(para1, &hash_key(b"key1")), Some(b"value1".to_vec())); + assert_eq!(Broadcaster::get_published_value(para2, &hash_key(b"key2")), Some(b"value2".to_vec())); + assert_eq!(Broadcaster::get_published_value(para3, &hash_key(b"key3")), Some(b"value3".to_vec())); // Verify no cross-contamination - assert_eq!(Broadcaster::get_published_value(para1, b"key2"), None); - assert_eq!(Broadcaster::get_published_value(para2, b"key3"), None); - assert_eq!(Broadcaster::get_published_value(para3, b"key1"), None); + assert_eq!(Broadcaster::get_published_value(para1, &hash_key(b"key2")), None); + assert_eq!(Broadcaster::get_published_value(para2, &hash_key(b"key3")), None); + assert_eq!(Broadcaster::get_published_value(para3, &hash_key(b"key1")), None); }); } @@ -411,7 +399,7 @@ fn max_publishers_limit_enforced() { RuntimeOrigin::signed(100 + i as u64), para_id )); - let data = vec![(b"key".to_vec(), b"value".to_vec())]; + let data = vec![(hash_key(b"key"), b"value".to_vec())]; assert_ok!(Broadcaster::handle_publish(para_id, data)); } @@ -429,10 +417,10 @@ fn max_publishers_limit_enforced() { // Existing publisher can still update let existing_para = ParaId::from(2000); - let update_data = vec![(b"key".to_vec(), b"updated".to_vec())]; + let update_data = vec![(hash_key(b"key"), b"updated".to_vec())]; assert_ok!(Broadcaster::handle_publish(existing_para, update_data)); assert_eq!( - Broadcaster::get_published_value(existing_para, b"key"), + Broadcaster::get_published_value(existing_para, &hash_key(b"key")), Some(b"updated".to_vec()) ); }); @@ -446,8 +434,8 @@ fn cleanup_published_data_works() { assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); let data = vec![ - (b"key1".to_vec(), b"value1".to_vec()), - (b"key2".to_vec(), b"value2".to_vec()), + (hash_key(b"key1"), b"value1".to_vec()), + (hash_key(b"key2"), b"value2".to_vec()), ]; assert_ok!(Broadcaster::handle_publish(para_id, data)); @@ -458,8 +446,8 @@ fn cleanup_published_data_works() { assert!(!PublisherExists::::get(para_id)); assert_eq!(PublishedKeys::::get(para_id).len(), 0); - assert_eq!(Broadcaster::get_published_value(para_id, b"key1"), None); - assert_eq!(Broadcaster::get_published_value(para_id, b"key2"), None); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key1")), None); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(b"key2")), None); assert!(RegisteredPublishers::::get(para_id).is_some()); }); } @@ -472,7 +460,7 @@ fn cleanup_requires_manager() { setup_account(BOB, 10000); assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); - assert_ok!(Broadcaster::handle_publish(para_id, vec![(b"key".to_vec(), b"value".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(hash_key(b"key"), b"value".to_vec())])); assert_err!( Broadcaster::cleanup_published_data(RuntimeOrigin::signed(BOB), para_id), @@ -537,7 +525,7 @@ fn deregister_fails_if_data_exists() { setup_account(ALICE, 10000); assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); - assert_ok!(Broadcaster::handle_publish(para_id, vec![(b"key".to_vec(), b"value".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(hash_key(b"key"), b"value".to_vec())])); assert_err!( Broadcaster::deregister_publisher(RuntimeOrigin::signed(ALICE), para_id), @@ -572,9 +560,9 @@ fn two_phase_cleanup_and_deregister_works() { assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); let data = vec![ - (b"key1".to_vec(), b"value1".to_vec()), - (b"key2".to_vec(), b"value2".to_vec()), - (b"key3".to_vec(), b"value3".to_vec()), + (hash_key(b"key1"), b"value1".to_vec()), + (hash_key(b"key2"), b"value2".to_vec()), + (hash_key(b"key3"), b"value3".to_vec()), ]; assert_ok!(Broadcaster::handle_publish(para_id, data)); @@ -599,8 +587,8 @@ fn force_deregister_works() { assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); let data = vec![ - (b"key1".to_vec(), b"value1".to_vec()), - (b"key2".to_vec(), b"value2".to_vec()), + (hash_key(b"key1"), b"value1".to_vec()), + (hash_key(b"key2"), b"value2".to_vec()), ]; assert_ok!(Broadcaster::handle_publish(para_id, data)); @@ -636,7 +624,7 @@ fn force_deregister_requires_root() { setup_account(ALICE, 10000); assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_id)); - assert_ok!(Broadcaster::handle_publish(para_id, vec![(b"key".to_vec(), b"value".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(hash_key(b"key"), b"value".to_vec())])); assert_err!( Broadcaster::force_deregister_publisher(RuntimeOrigin::signed(ALICE), para_id), @@ -661,7 +649,7 @@ fn cleanup_removes_all_keys_from_child_trie() { let mut data = Vec::new(); for i in 0..10 { let key = format!("key_{}_{}", batch, i); - data.push((key.as_bytes().to_vec(), b"value".to_vec())); + data.push((hash_key(key.as_bytes()), b"value".to_vec())); } assert_ok!(Broadcaster::handle_publish(para_id, data)); } @@ -673,7 +661,7 @@ fn cleanup_removes_all_keys_from_child_trie() { for batch in 0..5 { for i in 0..10 { let key = format!("key_{}_{}", batch, i); - assert_eq!(Broadcaster::get_published_value(para_id, key.as_bytes()), None); + assert_eq!(Broadcaster::get_published_value(para_id, &hash_key(key.as_bytes())), None); } } @@ -694,7 +682,7 @@ fn force_deregister_with_zero_deposit() { para_id )); - assert_ok!(Broadcaster::handle_publish(para_id, vec![(b"key".to_vec(), b"value".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_id, vec![(hash_key(b"key"), b"value".to_vec())])); assert_ok!(Broadcaster::force_deregister_publisher(RuntimeOrigin::root(), para_id)); @@ -717,9 +705,9 @@ fn cleanup_outgoing_publishers_works() { assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_b)); assert_ok!(Broadcaster::register_publisher(RuntimeOrigin::signed(ALICE), para_c)); - assert_ok!(Broadcaster::handle_publish(para_a, vec![(b"key1".to_vec(), b"value1".to_vec())])); - assert_ok!(Broadcaster::handle_publish(para_b, vec![(b"key2".to_vec(), b"value2".to_vec())])); - assert_ok!(Broadcaster::handle_publish(para_c, vec![(b"key3".to_vec(), b"value3".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_a, vec![(hash_key(b"key1"), b"value1".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_b, vec![(hash_key(b"key2"), b"value2".to_vec())])); + assert_ok!(Broadcaster::handle_publish(para_c, vec![(hash_key(b"key3"), b"value3".to_vec())])); let notification = crate::initializer::SessionChangeNotification::default(); let outgoing_paras = vec![para_a, para_b]; diff --git a/polkadot/runtime/parachains/src/broadcaster/traits.rs b/polkadot/runtime/parachains/src/broadcaster/traits.rs index 6d08887ead1a7..3f8046894f034 100644 --- a/polkadot/runtime/parachains/src/broadcaster/traits.rs +++ b/polkadot/runtime/parachains/src/broadcaster/traits.rs @@ -23,7 +23,10 @@ use sp_runtime::DispatchResult; /// Trait for handling publish operations for parachains. /// /// This trait provides the interface for parachains to publish key-value data. +/// Keys must be 32-byte hashes. pub trait Publish { /// Publish key-value data for a specific parachain. - fn publish_data(publisher: ParaId, data: Vec<(Vec, Vec)>) -> DispatchResult; + /// + /// Keys must be 32-byte hashes. + fn publish_data(publisher: ParaId, data: Vec<([u8; 32], Vec)>) -> DispatchResult; } diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 3c66fb3c9fb01..6877fb3412516 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -219,7 +219,6 @@ impl crate::shared::Config for Test { parameter_types! { pub const MaxPublishItems: u32 = 16; - pub const MaxKeyLength: u32 = 32; pub const MaxValueLength: u32 = 1024; pub const MaxStoredKeys: u32 = 100; pub const MaxPublishers: u32 = 1000; @@ -234,7 +233,6 @@ impl crate::broadcaster::Config for Test { type RuntimeHoldReason = RuntimeHoldReason; type WeightInfo = (); type MaxPublishItems = MaxPublishItems; - type MaxKeyLength = MaxKeyLength; type MaxValueLength = MaxValueLength; type MaxStoredKeys = MaxStoredKeys; type MaxPublishers = MaxPublishers; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 9ad62af1be23b..dcba3758906b0 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1228,7 +1228,6 @@ impl parachains_slashing::Config for Runtime { parameter_types! { pub const MaxPublishItems: u32 = 10; - pub const MaxKeyLength: u32 = 32; pub const MaxValueLength: u32 = 1024; pub const MaxStoredKeys: u32 = 100; pub const MaxPublishers: u32 = 1000; @@ -1240,7 +1239,6 @@ impl parachains_broadcaster::Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type WeightInfo = (); type MaxPublishItems = MaxPublishItems; - type MaxKeyLength = MaxKeyLength; type MaxValueLength = MaxValueLength; type MaxStoredKeys = MaxStoredKeys; type MaxPublishers = MaxPublishers; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 199543f4efa76..ec2c21bcff56e 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -963,19 +963,20 @@ mod benchmarks { #[benchmark] fn publish(n: Linear<1, { MaxPublishItems::get() }>) -> Result<(), BenchmarkError> { - use xcm::latest::{MaxPublishKeyLength, MaxPublishValueLength}; + use xcm::latest::MaxPublishValueLength; // The `Publish` instruction weight scales with the number of items published. - // Each item is benchmarked at maximum key and value lengths to represent worst-case + // Each item is benchmarked at maximum value length to represent worst-case // storage operations. The actual weight formula will be `base_weight + n * per_item_weight`. - let max_key_len = MaxPublishKeyLength::get() as usize; let max_value_len = MaxPublishValueLength::get() as usize; - // Create publish data: n items, each with maximum key and value length + // Create publish data: n items, each with a unique hash key and maximum value length let data_vec: Vec<_> = (0..n) .map(|i| { + let mut key = [0u8; 32]; + key[0] = i as u8; ( - BoundedVec::try_from(vec![i as u8; max_key_len]).unwrap(), + key, BoundedVec::try_from(vec![i as u8; max_value_len]).unwrap(), ) }) diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index d7f6e6984e1bf..d1cd609b420a7 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -188,7 +188,7 @@ pub mod prelude { Junctions::{self, Here}, Location, MaxAssetTransferFilters, MaxPublishItems, MaybeErrorCode, NetworkId::{self, *}, - PublishData, + PublishData, PublishKey, OriginKind, Outcome, PalletInfo, Parent, ParentThen, PreparedMessage, QueryId, QueryResponseInfo, Reanchorable, Response, Result as XcmResult, SendError, SendResult, SendXcm, Weight, @@ -213,12 +213,14 @@ parameter_types! { pub MaxPalletsInfo: u32 = 64; pub MaxAssetTransferFilters: u32 = 6; pub MaxPublishItems: u32 = 16; - pub MaxPublishKeyLength: u32 = 32; pub MaxPublishValueLength: u32 = 1024; } +/// Key type for published data - a 32-byte hash +pub type PublishKey = [u8; 32]; + pub type PublishData = BoundedVec< - (BoundedVec, BoundedVec), + (PublishKey, BoundedVec), MaxPublishItems, >; diff --git a/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs b/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs index 404159c7e36b7..4e2c4d50397a6 100644 --- a/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/broadcast_adapter.rs @@ -47,10 +47,10 @@ where }; // Call the actual handler - let data_vec: Vec<(Vec, Vec)> = data + let data_vec: Vec<([u8; 32], Vec)> = data .into_inner() .into_iter() - .map(|(k, v)| (k.into_inner(), v.into_inner())) + .map(|(k, v)| (k, v.into_inner())) .collect(); Handler::publish_data(para_id, data_vec).map_err(|_| XcmError::PublishFailed) } @@ -72,21 +72,21 @@ mod tests { use sp_runtime::BoundedVec; use xcm::latest::prelude::XcmError; use xcm::latest::{ - Junction, Location, MaxPublishKeyLength, MaxPublishValueLength, PublishData, + Junction, Location, MaxPublishValueLength, PublishData, PublishKey, }; // Mock handler that tracks calls parameter_types! { - pub static PublishCalls: Vec<(ParaId, Vec<(Vec, Vec)>)> = vec![]; + pub static PublishCalls: Vec<(ParaId, Vec<(PublishKey, Vec)>)> = vec![]; } // Helper to create test publish data - fn test_publish_data(items: Vec<(&[u8], &[u8])>) -> PublishData { + fn test_publish_data(items: Vec<([u8; 32], &[u8])>) -> PublishData { items .into_iter() .map(|(k, v)| { ( - BoundedVec::::try_from(k.to_vec()).unwrap(), + k, BoundedVec::::try_from(v.to_vec()).unwrap(), ) }) @@ -99,7 +99,7 @@ mod tests { impl Publish for MockPublishHandler { fn publish_data( publisher: ParaId, - data: Vec<(Vec, Vec)>, + data: Vec<([u8; 32], Vec)>, ) -> Result<(), sp_runtime::DispatchError> { let mut calls = PublishCalls::get(); calls.push((publisher, data)); @@ -112,7 +112,8 @@ mod tests { fn publish_from_direct_parachain_works() { PublishCalls::set(vec![]); let origin = Location::new(0, [Junction::Parachain(1000)]); - let data = test_publish_data(vec![(b"key1", b"value1")]); + let key1 = [1u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); let result = ParachainBroadcastAdapter::::handle_publish( &origin, @@ -123,7 +124,7 @@ mod tests { let calls = PublishCalls::get(); assert_eq!(calls.len(), 1); assert_eq!(calls[0].0, ParaId::from(1000)); - assert_eq!(calls[0].1, vec![(b"key1".to_vec(), b"value1".to_vec())]); + assert_eq!(calls[0].1, vec![(key1, b"value1".to_vec())]); } #[test] @@ -133,7 +134,8 @@ mod tests { 1, [Junction::Parachain(2000), Junction::AccountId32 { network: None, id: [1; 32] }], ); - let data = test_publish_data(vec![(b"key1", b"value1")]); + let key1 = [2u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); let result = ParachainBroadcastAdapter::::handle_publish( &origin, @@ -148,7 +150,8 @@ mod tests { fn publish_from_non_parachain_fails() { PublishCalls::set(vec![]); let origin = Location::here(); - let data = test_publish_data(vec![(b"key1", b"value1")]); + let key1 = [3u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); let result = ParachainBroadcastAdapter::::handle_publish( diff --git a/polkadot/xcm/xcm-builder/src/test_utils.rs b/polkadot/xcm/xcm-builder/src/test_utils.rs index 26e6938a27c05..ec5b78b8dc2ec 100644 --- a/polkadot/xcm/xcm-builder/src/test_utils.rs +++ b/polkadot/xcm/xcm-builder/src/test_utils.rs @@ -34,7 +34,7 @@ parameter_types! { pub static SubscriptionRequests: Vec<(Location, Option<(QueryId, Weight)>)> = vec![]; pub static MaxAssetsIntoHolding: u32 = 4; // Maps ParaId => Vec<(key, value)> - pub static PublishedData: BTreeMap, Vec)>> = BTreeMap::new(); + pub static PublishedData: BTreeMap)>> = BTreeMap::new(); } pub struct TestSubscriptionService; @@ -76,10 +76,10 @@ impl BroadcastHandler for TestBroadcastHandler { }; let mut published = PublishedData::get(); - let data_vec: Vec<(Vec, Vec)> = data + let data_vec: Vec<([u8; 32], Vec)> = data .into_inner() .into_iter() - .map(|(k, v)| (k.into_inner(), v.into_inner())) + .map(|(k, v)| (k, v.into_inner())) .collect(); // Merge with existing data for this parachain diff --git a/polkadot/xcm/xcm-builder/src/tests/publish.rs b/polkadot/xcm/xcm-builder/src/tests/publish.rs index 6ac7369c555e8..3d0933a6f3f03 100644 --- a/polkadot/xcm/xcm-builder/src/tests/publish.rs +++ b/polkadot/xcm/xcm-builder/src/tests/publish.rs @@ -19,15 +19,15 @@ use super::*; use crate::test_utils::PublishedData; use sp_runtime::BoundedVec; -use xcm::latest::{MaxPublishKeyLength, MaxPublishValueLength}; +use xcm::latest::{MaxPublishValueLength, PublishKey}; // Helper to create test publish data -fn test_publish_data(items: Vec<(&[u8], &[u8])>) -> PublishData { +fn test_publish_data(items: Vec<(PublishKey, &[u8])>) -> PublishData { items .into_iter() .map(|(k, v)| { ( - BoundedVec::::try_from(k.to_vec()).unwrap(), + k, BoundedVec::::try_from(v.to_vec()).unwrap(), ) }) @@ -41,7 +41,8 @@ fn publish_from_parachain_works() { // Allow unpaid execution from Parachain(1000) AllowUnpaidFrom::set(vec![Parachain(1000).into()]); - let data = test_publish_data(vec![(b"key1", b"value1")]); + let key1 = [1u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); let message = Xcm::(vec![Publish { data: data.clone() }]); let mut hash = fake_message_hash(&message); @@ -60,7 +61,7 @@ fn publish_from_parachain_works() { // Verify data was published let published = PublishedData::get(); assert_eq!(published.get(&1000).unwrap().len(), 1); - assert_eq!(published.get(&1000).unwrap()[0], (b"key1".to_vec(), b"value1".to_vec())); + assert_eq!(published.get(&1000).unwrap()[0], (key1, b"value1".to_vec())); } #[test] @@ -68,7 +69,8 @@ fn publish_from_non_parachain_fails() { // Allow unpaid execution from Parent to test that origin validation happens AllowUnpaidFrom::set(vec![Parent.into()]); - let data = test_publish_data(vec![(b"key1", b"value1")]); + let key1 = [2u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); let message = Xcm::(vec![Publish { data }]); let mut hash = fake_message_hash(&message); @@ -97,7 +99,8 @@ fn publish_without_origin_fails() { // Allow unpaid execution from Parachain(1000) AllowUnpaidFrom::set(vec![Parachain(1000).into()]); - let data = test_publish_data(vec![(b"key1", b"value1")]); + let key1 = [4u8; 32]; + let data = test_publish_data(vec![(key1, b"value1")]); let message = Xcm::(vec![ClearOrigin, Publish { data }]); let mut hash = fake_message_hash(&message); @@ -125,9 +128,11 @@ fn publish_multiple_items_works() { // Allow unpaid execution from Parachain(1000) AllowUnpaidFrom::set(vec![Parachain(1000).into()]); + let key1 = [5u8; 32]; + let key2 = [6u8; 32]; let data = test_publish_data(vec![ - (b"key1", b"value1"), - (b"key2", b"value2"), + (key1, b"value1"), + (key2, b"value2"), ]); let message = Xcm::(vec![Publish { data: data.clone() }]); @@ -148,6 +153,6 @@ fn publish_multiple_items_works() { let published = PublishedData::get(); let para_data = published.get(&1000).unwrap(); assert_eq!(para_data.len(), 2); - assert!(para_data.contains(&(b"key1".to_vec(), b"value1".to_vec()))); - assert!(para_data.contains(&(b"key2".to_vec(), b"value2".to_vec()))); + assert!(para_data.contains(&(key1, b"value1".to_vec()))); + assert!(para_data.contains(&(key2, b"value2".to_vec()))); } From c7f338efd0f3c44c70638f1845e89e74879b766a Mon Sep 17 00:00:00 2001 From: metricaez Date: Sat, 13 Dec 2025 21:37:19 -0300 Subject: [PATCH 38/69] feat: enforce max storage per publisher --- cumulus/pallets/pubsub-consumer/src/lib.rs | 16 ++++- .../runtime/parachains/src/broadcaster/mod.rs | 60 +++++++++++++++++++ .../parachains/src/broadcaster/tests.rs | 37 ++++++++++-- polkadot/runtime/parachains/src/mock.rs | 4 +- polkadot/runtime/rococo/src/lib.rs | 2 + polkadot/xcm/src/v5/mod.rs | 4 +- 6 files changed, 116 insertions(+), 7 deletions(-) diff --git a/cumulus/pallets/pubsub-consumer/src/lib.rs b/cumulus/pallets/pubsub-consumer/src/lib.rs index 89877ae9b50c7..1a7990a2fdbf3 100644 --- a/cumulus/pallets/pubsub-consumer/src/lib.rs +++ b/cumulus/pallets/pubsub-consumer/src/lib.rs @@ -30,7 +30,21 @@ pub struct TestSubscriptionHandler(core::marker::PhantomData); impl cumulus_pallet_subscriber::SubscriptionHandler for TestSubscriptionHandler { fn subscriptions() -> Vec<(ParaId, Vec>)> { - alloc::vec![(ParaId::from(1000), alloc::vec![alloc::vec![0x12, 0x34]])] + // Subscribe to keys from publisher ParaId 1000 + let key1 = alloc::vec![ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + ]; + let key2 = alloc::vec![ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + ]; + + alloc::vec![(ParaId::from(1000), alloc::vec![key1, key2])] } fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) { diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index f971cbbdfaeba..568b3cfef3e24 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -35,6 +35,13 @@ //! trie root is stored on-chain and can be included in storage proofs to verify //! published data. //! +//! Published data uses: +//! - Keys: 32-byte hashes (fixed size) +//! - Values: Bounded by `MaxValueLength` +//! - Total storage limit: `MaxTotalStorageSize` per publisher +//! +//! The total storage size is calculated as the sum of all (32-byte key + value length) pairs. +//! //! ## Storage Lifecycle //! //! Publishers can deregister to reclaim their deposit and remove their data: @@ -140,6 +147,13 @@ pub mod pallet { #[pallet::constant] type MaxStoredKeys: Get; + /// Maximum total storage size per publisher in bytes. + /// + /// This is the sum of all (32-byte key + value) pairs. + /// Typically set to ~2048 bytes (2 KiB) to limit storage overhead per publisher. + #[pallet::constant] + type MaxTotalStorageSize: Get; + /// Maximum number of parachains that can register as publishers. #[pallet::constant] type MaxPublishers: Get; @@ -200,6 +214,18 @@ pub mod pallet { ValueQuery, >; + /// Total storage size in bytes for each publisher. + /// + /// Calculated as the sum of all (32-byte key + value length) pairs. + #[pallet::storage] + pub type TotalStorageSize = StorageMap< + _, + Twox64Concat, + ParaId, + u32, + ValueQuery, + >; + #[pallet::error] pub enum Error { @@ -209,6 +235,8 @@ pub mod pallet { ValueTooLong, /// Too many unique keys stored for this publisher. TooManyStoredKeys, + /// Total storage size exceeds maximum allowed for this publisher. + TotalStorageSizeExceeded, /// Maximum number of publishers reached. TooManyPublishers, /// Para is not registered as a publisher. @@ -452,6 +480,7 @@ pub mod pallet { // Clean up tracking storage PublishedKeys::::remove(para_id); + TotalStorageSize::::remove(para_id); PublisherExists::::remove(para_id); Ok(()) @@ -555,6 +584,7 @@ pub mod pallet { } let mut published_keys = PublishedKeys::::get(origin_para_id); + let current_total_size = TotalStorageSize::::get(origin_para_id); // Count new unique keys to prevent exceeding MaxStoredKeys let mut new_keys_count = 0u32; @@ -570,6 +600,35 @@ pub mod pallet { Error::::TooManyStoredKeys ); + // Calculate storage delta: each item is 32 bytes (key) + value length + let child_info = Self::derive_child_info(origin_para_id); + let mut size_delta: i64 = 0; + + for (key, value) in &data { + let new_size = 32u32.saturating_add(value.len() as u32); + + // If key already exists, subtract old value size + if let Some(old_value) = frame_support::storage::child::get::>(&child_info, key) { + let old_size = 32u32.saturating_add(old_value.len() as u32); + size_delta = size_delta.saturating_add(new_size as i64).saturating_sub(old_size as i64); + } else { + size_delta = size_delta.saturating_add(new_size as i64); + } + } + + // Calculate new total size + let new_total_size = if size_delta >= 0 { + current_total_size.saturating_add(size_delta as u32) + } else { + current_total_size.saturating_sub((-size_delta) as u32) + }; + + // Ensure we don't exceed the total storage limit + ensure!( + new_total_size <= T::MaxTotalStorageSize::get(), + Error::::TotalStorageSizeExceeded + ); + // Get or create child trie for this publisher let child_info = Self::get_or_create_publisher_child_info(origin_para_id); @@ -580,6 +639,7 @@ pub mod pallet { } PublishedKeys::::insert(origin_para_id, published_keys); + TotalStorageSize::::insert(origin_para_id, new_total_size); Self::deposit_event(Event::DataPublished { publisher: origin_para_id, items_count }); diff --git a/polkadot/runtime/parachains/src/broadcaster/tests.rs b/polkadot/runtime/parachains/src/broadcaster/tests.rs index 0938759d666dc..e448e480acab6 100644 --- a/polkadot/runtime/parachains/src/broadcaster/tests.rs +++ b/polkadot/runtime/parachains/src/broadcaster/tests.rs @@ -271,18 +271,47 @@ fn handle_publish_respects_value_length_limit() { }); } +#[test] +fn total_storage_size_limit_enforced() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + // Try to publish data that exceeds 2048 bytes total + // Each item is 32 (key) + 1024 (value) = 1056 bytes + // Two items would be 2112 bytes, exceeding the 2048 limit + let data1 = vec![(hash_key(b"key1"), vec![b'a'; 1024])]; + assert_ok!(Broadcaster::handle_publish(para_id, data1)); + + // Second item should fail due to total storage size + let data2 = vec![(hash_key(b"key2"), vec![b'b'; 1024])]; + let result = Broadcaster::handle_publish(para_id, data2); + assert_err!(result, Error::::TotalStorageSizeExceeded); + + // But updating the existing key with a smaller value should work + let data3 = vec![(hash_key(b"key1"), vec![b'c'; 100])]; + assert_ok!(Broadcaster::handle_publish(para_id, data3)); + + // Now we should have room for more data + let data4 = vec![(hash_key(b"key2"), vec![b'd'; 900])]; + assert_ok!(Broadcaster::handle_publish(para_id, data4)); + }); +} + #[test] fn max_stored_keys_limit_enforced() { new_test_ext(Default::default()).execute_with(|| { let para_id = ParaId::from(2000); register_test_publisher(para_id); - for batch in 0..7 { + // Publish 50 small items to test MaxStoredKeys without hitting TotalStorageSize limit + // Each item is 32 (key) + 1 (value) = 33 bytes, total ~1650 bytes + for batch in 0..4 { let mut data = Vec::new(); for i in 0..16 { let key_num = batch * 16 + i; - if key_num < 100 { - data.push((hash_key(&format!("key{}", key_num).into_bytes()), b"value".to_vec())); + if key_num < 50 { + data.push((hash_key(&format!("key{}", key_num).into_bytes()), b"v".to_vec())); } } if !data.is_empty() { @@ -291,7 +320,7 @@ fn max_stored_keys_limit_enforced() { } let published_keys = PublishedKeys::::get(para_id); - assert_eq!(published_keys.len(), 100); + assert_eq!(published_keys.len(), 50); let result = Broadcaster::handle_publish(para_id, vec![(hash_key(b"new_key"), b"value".to_vec())]); diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 6877fb3412516..217d7dd3632ed 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -220,7 +220,8 @@ impl crate::shared::Config for Test { parameter_types! { pub const MaxPublishItems: u32 = 16; pub const MaxValueLength: u32 = 1024; - pub const MaxStoredKeys: u32 = 100; + pub const MaxStoredKeys: u32 = 50; + pub const MaxTotalStorageSize: u32 = 2048; // 2 KiB pub const MaxPublishers: u32 = 1000; } @@ -235,6 +236,7 @@ impl crate::broadcaster::Config for Test { type MaxPublishItems = MaxPublishItems; type MaxValueLength = MaxValueLength; type MaxStoredKeys = MaxStoredKeys; + type MaxTotalStorageSize = MaxTotalStorageSize; type MaxPublishers = MaxPublishers; type PublisherDeposit = PublisherDeposit; } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index dcba3758906b0..38b1f6cefe6b9 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1230,6 +1230,7 @@ parameter_types! { pub const MaxPublishItems: u32 = 10; pub const MaxValueLength: u32 = 1024; pub const MaxStoredKeys: u32 = 100; + pub const MaxTotalStorageSize: u32 = 2048; // 2 KiB pub const MaxPublishers: u32 = 1000; pub const PublisherDeposit: Balance = 100 * UNITS; } @@ -1241,6 +1242,7 @@ impl parachains_broadcaster::Config for Runtime { type MaxPublishItems = MaxPublishItems; type MaxValueLength = MaxValueLength; type MaxStoredKeys = MaxStoredKeys; + type MaxTotalStorageSize = MaxTotalStorageSize; type MaxPublishers = MaxPublishers; type PublisherDeposit = PublisherDeposit; } diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index d1cd609b420a7..a80935f2581ee 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -1157,6 +1157,8 @@ pub enum Instruction { /// which are stored in child tries on the relay chain indexed by the publisher's ParaId. /// /// - `data`: The key-value pairs to be published, bounded by MaxPublishItems + /// - Keys: 32-byte hashes + /// - Values: Bounded by MaxPublishValueLength /// /// Safety: Origin must be a parachain (Sovereign Account). The relay chain will validate /// the origin and store data in the appropriate child trie. @@ -1166,7 +1168,7 @@ pub enum Instruction { /// Errors: /// - NoPermission: If origin is not authorized by the configured filter /// - BadOrigin: If origin is not a valid parachain - /// - PublishFailed: If the underlying handler fails (e.g., key/value too long, too many items) + /// - PublishFailed: If the underlying handler fails Publish { data: PublishData }, } From 9193e51513f49b5d0218855734951cc2be10cb91 Mon Sep 17 00:00:00 2001 From: metricaez Date: Sat, 13 Dec 2025 23:27:04 -0300 Subject: [PATCH 39/69] feat: early exit for empty data publishing --- .../runtime/parachains/src/broadcaster/mod.rs | 5 ++++ .../parachains/src/broadcaster/tests.rs | 28 +++++++++++-------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index 568b3cfef3e24..12cda1edf9814 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -251,6 +251,8 @@ pub mod pallet { MustCleanupDataFirst, /// No published data to cleanup. NoDataToCleanup, + /// Cannot publish empty data. + EmptyPublish, } #[pallet::hooks] @@ -567,6 +569,9 @@ pub mod pallet { Error::::PublishNotAuthorized ); + // Reject empty publishes to avoid wasting execution weight + ensure!(!data.is_empty(), Error::::EmptyPublish); + let items_count = data.len() as u32; // Validate input limits first before making any changes diff --git a/polkadot/runtime/parachains/src/broadcaster/tests.rs b/polkadot/runtime/parachains/src/broadcaster/tests.rs index e448e480acab6..980ca12bee234 100644 --- a/polkadot/runtime/parachains/src/broadcaster/tests.rs +++ b/polkadot/runtime/parachains/src/broadcaster/tests.rs @@ -229,18 +229,6 @@ fn publish_store_retrieve_and_update_data() { }); } -#[test] -fn empty_publish_still_creates_publisher() { - new_test_ext(Default::default()).execute_with(|| { - let para_id = ParaId::from(2000); - register_test_publisher(para_id); - - let _ = Broadcaster::handle_publish(para_id, vec![]); - - assert!(PublisherExists::::get(para_id)); - }); -} - #[test] fn handle_publish_respects_max_items_limit() { new_test_ext(Default::default()).execute_with(|| { @@ -753,3 +741,19 @@ fn cleanup_outgoing_publishers_works() { assert!(PublisherExists::::get(para_c)); }); } + +#[test] +fn empty_publish_fails() { + new_test_ext(Default::default()).execute_with(|| { + let para_id = ParaId::from(2000); + register_test_publisher(para_id); + + // Try to publish empty data + let empty_data: Vec<([u8; 32], Vec)> = vec![]; + + assert_err!( + Broadcaster::handle_publish(para_id, empty_data), + Error::::EmptyPublish + ); + }); +} From d1d8b5a76f723023abab76ddd0688b016ec22e3b Mon Sep 17 00:00:00 2001 From: metricaez Date: Sun, 14 Dec 2025 00:56:49 -0300 Subject: [PATCH 40/69] choir: same import syntax as crate --- polkadot/runtime/parachains/src/broadcaster/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index 12cda1edf9814..2898f75b917c9 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -286,7 +286,7 @@ pub mod pallet { /// Events: /// - `PublisherRegistered` #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::register_publisher())] + #[pallet::weight(::WeightInfo::register_publisher())] pub fn register_publisher( origin: OriginFor, para_id: ParaId, @@ -312,7 +312,7 @@ pub mod pallet { /// Events: /// - `PublisherRegistered` #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::force_register_publisher())] + #[pallet::weight(::WeightInfo::force_register_publisher())] pub fn force_register_publisher( origin: OriginFor, manager: T::AccountId, @@ -340,7 +340,7 @@ pub mod pallet { /// - `DataCleanedUp` #[pallet::call_index(2)] #[pallet::weight( - T::WeightInfo::do_cleanup_publisher(T::MaxStoredKeys::get()) + ::WeightInfo::do_cleanup_publisher(T::MaxStoredKeys::get()) .saturating_add(T::DbWeight::get().reads(2)) )] pub fn cleanup_published_data( @@ -412,7 +412,7 @@ pub mod pallet { /// - `PublisherDeregistered` #[pallet::call_index(4)] #[pallet::weight( - T::WeightInfo::do_cleanup_publisher(T::MaxStoredKeys::get()) + ::WeightInfo::do_cleanup_publisher(T::MaxStoredKeys::get()) .saturating_add(T::DbWeight::get().reads_writes(2, 1)) )] pub fn force_deregister_publisher( @@ -537,7 +537,7 @@ pub mod pallet { let published_keys = PublishedKeys::::get(outgoing_para); let key_count = published_keys.len() as u32; let _ = Self::do_cleanup_publisher(*outgoing_para); - T::WeightInfo::do_cleanup_publisher(key_count) + ::WeightInfo::do_cleanup_publisher(key_count) } else { Weight::zero() }; From 69fab79b1d91b5462992927f9257028641a89c56 Mon Sep 17 00:00:00 2001 From: metricaez Date: Sun, 14 Dec 2025 02:14:15 -0300 Subject: [PATCH 41/69] choir: unify duplicated sproof generation code on Subscriber --- .../pallets/subscriber/src/benchmarking.rs | 60 +--------------- cumulus/pallets/subscriber/src/mock.rs | 69 ++++++++++++------- cumulus/pallets/subscriber/src/tests.rs | 16 ++--- 3 files changed, 54 insertions(+), 91 deletions(-) diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs index a39a084e48e40..0cec1b4e4e2d1 100644 --- a/cumulus/pallets/subscriber/src/benchmarking.rs +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -18,65 +18,11 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use crate::Pallet as Subscriber; -use codec::Encode; -use cumulus_pallet_parachain_system::RelayChainStateProof; +use crate::{mock::build_sproof_with_child_data, Pallet as Subscriber}; use cumulus_primitives_core::ParaId; use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::RawOrigin; -use sp_runtime::{traits::HashingFor, StateVersion}; -use sp_state_machine::{Backend, TrieBackendBuilder}; -use sp_trie::PrefixedMemoryDB; - -/// Build a relay chain state proof with child trie data for multiple publishers. -fn build_test_proof( - publishers: &[(ParaId, Vec<(Vec, Vec)>)], -) -> RelayChainStateProof { - let (db, root) = PrefixedMemoryDB::>::default_with_root(); - let state_version = StateVersion::default(); - let mut backend = TrieBackendBuilder::new(db, root).build(); - - let mut all_proofs = vec![]; - let mut main_trie_updates = vec![]; - - // Process each publisher - for (publisher_para_id, child_data) in publishers { - let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); - - // Insert child trie data - let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); - backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); - - // Get child trie root and prepare to insert it in main trie - let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; - let prefixed_key = child_info.prefixed_storage_key(); - main_trie_updates.push((prefixed_key.to_vec(), Some(child_root.encode()))); - - // Prove child trie keys - let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); - if !child_keys.is_empty() { - let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) - .expect("prove child read"); - all_proofs.push(child_proof); - } - } - - // Insert all child roots in main trie - backend.insert(vec![(None, main_trie_updates.clone())], state_version); - let root = *backend.root(); - - // Prove all child roots in main trie - let main_keys: Vec<_> = main_trie_updates.iter().map(|(k, _)| k.clone()).collect(); - let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, main_keys) - .expect("prove read"); - all_proofs.push(main_proof); - - // Merge all proofs - let proof = sp_trie::StorageProof::merge(all_proofs); - - RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") -} /// Create test subscriptions for benchmarking. fn create_subscriptions(n: u32, keys_per_publisher: u32) -> Vec<(ParaId, Vec>)> { @@ -123,7 +69,7 @@ mod benchmarks { let publishers: Vec<_> = (0..n) .map(|i| (ParaId::from(1000 + i), vec![(vec![i as u8], vec![25u8])])) .collect(); - let proof = build_test_proof::(&publishers); + let proof = build_sproof_with_child_data(&publishers); #[block] { @@ -153,7 +99,7 @@ mod benchmarks { (para_id, child_data) }) .collect(); - let proof = build_test_proof::(&publishers); + let proof = build_sproof_with_child_data(&publishers); let current_roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); #[block] diff --git a/cumulus/pallets/subscriber/src/mock.rs b/cumulus/pallets/subscriber/src/mock.rs index b39a52c5968c3..0284178de96a0 100644 --- a/cumulus/pallets/subscriber/src/mock.rs +++ b/cumulus/pallets/subscriber/src/mock.rs @@ -58,10 +58,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { t.into() } -/// Minimal relay chain state proof builder for subscriber tests +/// Build a relay chain state proof with child trie data for multiple publishers. pub fn build_sproof_with_child_data( - publisher_para_id: ParaId, - child_data: Vec<(Vec, Vec)>, + publishers: &[(ParaId, Vec<(Vec, Vec)>)], ) -> RelayChainStateProof { use sp_runtime::traits::HashingFor; @@ -69,34 +68,52 @@ pub fn build_sproof_with_child_data( let state_version = StateVersion::default(); let mut backend = TrieBackendBuilder::new(db, root).build(); - // Derive child info same way as pallet - let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", publisher_para_id).encode()); - - // Insert child trie data - let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); - backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); - - // Get child trie root and insert it in main trie - let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; - let prefixed_key = child_info.prefixed_storage_key(); - backend.insert( - vec![(None, vec![(prefixed_key.to_vec(), Some(child_root.encode()))])], - state_version, - ); + let mut all_proofs = vec![]; + let mut main_trie_updates = vec![]; + + // Process each publisher + for (publisher_para_id, child_data) in publishers { + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); + + // Insert child trie data + let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); + backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); + + // Get child trie root and prepare to insert it in main trie + let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; + let prefixed_key = child_info.prefixed_storage_key(); + main_trie_updates.push((prefixed_key.to_vec(), Some(child_root.encode()))); + + // Prove child trie keys + let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); + if !child_keys.is_empty() { + let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) + .expect("prove child read"); + all_proofs.push(child_proof); + } + } + // Insert all child roots in main trie + backend.insert(vec![(None, main_trie_updates.clone())], state_version); let root = *backend.root(); - // Prove child trie keys - let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); - let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) - .expect("prove child read"); - - // Prove child root in main trie - let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, vec![prefixed_key.to_vec()]) + // Prove all child roots in main trie + let main_keys: Vec<_> = main_trie_updates.iter().map(|(k, _)| k.clone()).collect(); + let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, main_keys) .expect("prove read"); + all_proofs.push(main_proof); - // Merge proofs - let proof = StorageProof::merge(vec![main_proof, child_proof]); + // Merge all proofs + let proof = StorageProof::merge(all_proofs); RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") } + +/// Build a relay chain state proof with child trie data for a single publisher. +/// This is a convenience wrapper for tests that only need one publisher. +pub fn build_test_proof( + publisher_para_id: ParaId, + child_data: Vec<(Vec, Vec)>, +) -> RelayChainStateProof { + build_sproof_with_child_data(&[(publisher_para_id, child_data)]) +} diff --git a/cumulus/pallets/subscriber/src/tests.rs b/cumulus/pallets/subscriber/src/tests.rs index c6a352e7787b1..88d2bd48fb1b8 100644 --- a/cumulus/pallets/subscriber/src/tests.rs +++ b/cumulus/pallets/subscriber/src/tests.rs @@ -19,7 +19,7 @@ fn process_relay_proof_keys_with_new_data_calls_handler() { TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); - let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); + let proof = build_test_proof(publisher, vec![(key.clone(), value.clone())]); Pallet::::process_relay_proof_keys(&proof); @@ -37,7 +37,7 @@ fn process_empty_subscriptions() { ReceivedData::set(vec![]); TestSubscriptions::set(vec![]); - let proof = build_sproof_with_child_data(ParaId::from(1000), vec![]); + let proof = build_test_proof(ParaId::from(1000), vec![]); Pallet::::process_relay_proof_keys(&proof); @@ -57,13 +57,13 @@ fn root_change_triggers_processing() { TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); // First block - let proof1 = build_sproof_with_child_data(publisher, vec![(key.clone(), value1.clone())]); + let proof1 = build_test_proof(publisher, vec![(key.clone(), value1.clone())]); Pallet::::process_relay_proof_keys(&proof1); assert_eq!(ReceivedData::get().len(), 1); // Second block with different value (root changed) ReceivedData::set(vec![]); - let proof2 = build_sproof_with_child_data(publisher, vec![(key.clone(), value2.clone())]); + let proof2 = build_test_proof(publisher, vec![(key.clone(), value2.clone())]); Pallet::::process_relay_proof_keys(&proof2); assert_eq!(ReceivedData::get().len(), 1); @@ -82,13 +82,13 @@ fn unchanged_root_skips_processing() { TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); // First block - let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); + let proof = build_test_proof(publisher, vec![(key.clone(), value.clone())]); Pallet::::process_relay_proof_keys(&proof); assert_eq!(ReceivedData::get().len(), 1); // Second block with same data (unchanged root) ReceivedData::set(vec![]); - let proof2 = build_sproof_with_child_data(publisher, vec![(key.clone(), value)]); + let proof2 = build_test_proof(publisher, vec![(key.clone(), value)]); Pallet::::process_relay_proof_keys(&proof2); assert_eq!(ReceivedData::get().len(), 0, "Handler should not be called for unchanged root"); @@ -102,7 +102,7 @@ fn clear_stored_roots_extrinsic() { TestSubscriptions::set(vec![(publisher, vec![vec![0x01]])]); // Store a root for the publisher - let proof = build_sproof_with_child_data(publisher, vec![(vec![0x01], vec![0x11].encode())]); + let proof = build_test_proof(publisher, vec![(vec![0x01], vec![0x11].encode())]); Pallet::::process_relay_proof_keys(&proof); // Verify root is stored @@ -173,7 +173,7 @@ fn data_processed_event_emitted() { TestSubscriptions::set(vec![(publisher, vec![key.clone()])]); - let proof = build_sproof_with_child_data(publisher, vec![(key.clone(), value.clone())]); + let proof = build_test_proof(publisher, vec![(key.clone(), value.clone())]); Pallet::::process_relay_proof_keys(&proof); // value_size is the decoded Vec length, not the encoded length From 7f92c301c49229fdb9ceba7298a4ba8c7aa2118a Mon Sep 17 00:00:00 2001 From: metricaez Date: Sun, 14 Dec 2025 02:37:44 -0300 Subject: [PATCH 42/69] fix: fix Subscriber buggy benchmark --- cumulus/pallets/subscriber/src/benchmarking.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs index 0cec1b4e4e2d1..ad01a4d5fa2ee 100644 --- a/cumulus/pallets/subscriber/src/benchmarking.rs +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -51,11 +51,14 @@ mod benchmarks { n: Linear<1, { T::MaxPublishers::get() }>, k: Linear<1, 10>, ) { - let _subscriptions = create_subscriptions(n, k); + let subscriptions = create_subscriptions(n, k); + crate::mock::TestSubscriptions::set(subscriptions); + let subs; #[block] { - let _subs = T::SubscriptionHandler::subscriptions(); + subs = T::SubscriptionHandler::subscriptions(); } + assert!(subs.len() == n as usize); } /// Benchmark collecting publisher roots from the relay state proof. @@ -70,11 +73,12 @@ mod benchmarks { .map(|i| (ParaId::from(1000 + i), vec![(vec![i as u8], vec![25u8])])) .collect(); let proof = build_sproof_with_child_data(&publishers); - + let roots; #[block] { - Subscriber::::collect_publisher_roots(&proof, &subscriptions); + roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); } + assert!(roots.len() == n as usize); } /// Benchmark processing published data from the relay proof. @@ -106,6 +110,7 @@ mod benchmarks { { Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); } + assert!(PreviousPublishedDataRoots::::get().len() == n as usize); } #[benchmark] From 2fc10ef56267c6c966e0b41b3415f6b381e220dc Mon Sep 17 00:00:00 2001 From: metricaez Date: Sun, 14 Dec 2025 11:34:11 -0300 Subject: [PATCH 43/69] misc: build bench util --- pubsub-dev/build-benchmarks.sh | 57 ++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100755 pubsub-dev/build-benchmarks.sh diff --git a/pubsub-dev/build-benchmarks.sh b/pubsub-dev/build-benchmarks.sh new file mode 100755 index 0000000000000..f4883f3dd8db5 --- /dev/null +++ b/pubsub-dev/build-benchmarks.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +export DYLD_LIBRARY_PATH=/Library/Developer/CommandLineTools/usr/lib +export SKIP_PALLET_REVIVE_FIXTURES=1 + +echo "🔨 Building Polkadot SDK binaries with runtime-benchmarks feature..." +echo + +# Build main polkadot binary with runtime-benchmarks +echo "📦 Building polkadot relay chain binary (with runtime-benchmarks)..." +cargo build --release -p polkadot --bin polkadot --features runtime-benchmarks +if [ $? -ne 0 ]; then + echo "❌ Failed to build polkadot binary" + exit 1 +fi +echo "✅ polkadot binary built successfully" +echo + +# Build PVF execute worker +echo "📦 Building polkadot-execute-worker..." +cargo build --release -p polkadot --bin polkadot-execute-worker +if [ $? -ne 0 ]; then + echo "❌ Failed to build polkadot-execute-worker" + exit 1 +fi +echo "✅ polkadot-execute-worker built successfully" +echo + +# Build PVF prepare worker +echo "📦 Building polkadot-prepare-worker..." +cargo build --release -p polkadot --bin polkadot-prepare-worker +if [ $? -ne 0 ]; then + echo "❌ Failed to build polkadot-prepare-worker" + exit 1 +fi +echo "✅ polkadot-prepare-worker built successfully" +echo + +# Build parachain binary with runtime-benchmarks +echo "📦 Building polkadot-parachain binary (with runtime-benchmarks)..." +cargo build --release -p polkadot-parachain-bin --bin polkadot-parachain --features runtime-benchmarks +if [ $? -ne 0 ]; then + echo "❌ Failed to build polkadot-parachain binary" + exit 1 +fi +echo "✅ polkadot-parachain binary built successfully" +echo + +echo "🎉 All binaries built successfully with runtime-benchmarks!" +echo +echo "📍 Binary locations:" +echo " - Relay chain: target/release/polkadot" +echo " - Execute worker: target/release/polkadot-execute-worker" +echo " - Prepare worker: target/release/polkadot-prepare-worker" +echo " - Parachain: target/release/polkadot-parachain" +echo +echo "🚀 Ready for benchmarking!" From c8b9c20888f2c41d81a58ff0c1d5f2f92af4ea22 Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 15 Dec 2025 12:19:41 -0300 Subject: [PATCH 44/69] fix: subscriber should not try to benchmark the handlers --- cumulus/pallets/pubsub-consumer/src/lib.rs | 8 ++-- .../pallets/subscriber/src/benchmarking.rs | 21 +-------- cumulus/pallets/subscriber/src/lib.rs | 46 ++++++++++--------- cumulus/pallets/subscriber/src/mock.rs | 7 +-- 4 files changed, 36 insertions(+), 46 deletions(-) diff --git a/cumulus/pallets/pubsub-consumer/src/lib.rs b/cumulus/pallets/pubsub-consumer/src/lib.rs index 1a7990a2fdbf3..66669d6e7499d 100644 --- a/cumulus/pallets/pubsub-consumer/src/lib.rs +++ b/cumulus/pallets/pubsub-consumer/src/lib.rs @@ -29,7 +29,7 @@ pub use pallet::*; pub struct TestSubscriptionHandler(core::marker::PhantomData); impl cumulus_pallet_subscriber::SubscriptionHandler for TestSubscriptionHandler { - fn subscriptions() -> Vec<(ParaId, Vec>)> { + fn subscriptions() -> (Vec<(ParaId, Vec>)>, Weight) { // Subscribe to keys from publisher ParaId 1000 let key1 = alloc::vec![ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -44,10 +44,10 @@ impl cumulus_pallet_subscriber::SubscriptionHandler for TestSubscript 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, ]; - alloc::vec![(ParaId::from(1000), alloc::vec![key1, key2])] + (alloc::vec![(ParaId::from(1000), alloc::vec![key1, key2])], Weight::zero()) } - fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) { + fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) -> Weight { let bounded_key: BoundedVec> = key.clone().try_into().unwrap_or_default(); let bounded_value: BoundedVec> = @@ -60,6 +60,8 @@ impl cumulus_pallet_subscriber::SubscriptionHandler for TestSubscript key: bounded_key, value: bounded_value, }); + + Weight::zero() } } diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs index ad01a4d5fa2ee..c3e1634e9d9e9 100644 --- a/cumulus/pallets/subscriber/src/benchmarking.rs +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -43,24 +43,6 @@ fn create_subscriptions(n: u32, keys_per_publisher: u32) -> Vec<(ParaId, Vec, - k: Linear<1, 10>, - ) { - let subscriptions = create_subscriptions(n, k); - crate::mock::TestSubscriptions::set(subscriptions); - let subs; - #[block] - { - subs = T::SubscriptionHandler::subscriptions(); - } - assert!(subs.len() == n as usize); - } - /// Benchmark collecting publisher roots from the relay state proof. /// /// Cost scales with the number of publishers `n`. @@ -106,9 +88,10 @@ mod benchmarks { let proof = build_sproof_with_child_data(&publishers); let current_roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); + let _weight; #[block] { - Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); + _weight = Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); } assert!(PreviousPublishedDataRoots::::get().len() == n as usize); } diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 401e90883f56a..c63df05e0bbb4 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -49,10 +49,12 @@ mod tests; /// Define subscriptions and handle received data. pub trait SubscriptionHandler { /// List of subscriptions as (ParaId, keys) tuples. - fn subscriptions() -> Vec<(ParaId, Vec>)>; + /// Returns (subscriptions, weight) where weight is the cost of computing the subscriptions. + fn subscriptions() -> (Vec<(ParaId, Vec>)>, Weight); /// Called when subscribed data is updated. - fn on_data_updated(publisher: ParaId, key: Vec, value: Vec); + /// Returns the weight consumed by processing the data. + fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) -> Weight; } #[frame_support::pallet] @@ -136,7 +138,8 @@ pub mod pallet { /// /// Returns a `RelayProofRequest` with child trie proof requests for subscribed data. pub fn get_relay_proof_requests() -> cumulus_primitives_core::RelayProofRequest { - let storage_keys = T::SubscriptionHandler::subscriptions() + let (subscriptions, _weight) = T::SubscriptionHandler::subscriptions(); + let storage_keys = subscriptions .into_iter() .flat_map(|(para_id, data_keys)| { let storage_key = Self::derive_storage_key(para_id); @@ -189,15 +192,17 @@ pub mod pallet { relay_state_proof: &RelayChainStateProof, current_roots: &Vec<(ParaId, Vec)>, subscriptions: &[(ParaId, Vec>)], - ) { + ) -> Weight { // Load roots from previous block for change detection. let previous_roots = >::get(); // Early exit if no publishers have any data. if current_roots.is_empty() && previous_roots.is_empty() { - return; + return T::DbWeight::get().reads(1); } + let mut total_handler_weight = Weight::zero(); + // Convert to map for efficient lookup by ParaId. let current_roots_map: BTreeMap> = current_roots.iter().map(|(para_id, root)| (*para_id, root.clone())).collect(); @@ -223,11 +228,12 @@ pub mod pallet { Ok(value) => { let value_size = value.len() as u32; // Notify handler of new data. - T::SubscriptionHandler::on_data_updated( + let handler_weight = T::SubscriptionHandler::on_data_updated( *publisher, key.clone(), value.clone(), ); + total_handler_weight = total_handler_weight.saturating_add(handler_weight); Self::deposit_event(Event::DataProcessed { publisher: *publisher, @@ -263,6 +269,8 @@ pub mod pallet { .try_into() .expect("MaxPublishers limit enforced in collect_publisher_roots; qed"); >::put(bounded_roots); + + total_handler_weight } } @@ -272,7 +280,7 @@ pub mod pallet { /// Note: This implementation only processes child trie keys (pubsub data). /// Main trie keys in the proof are intentionally ignored. fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight { - let subscriptions = T::SubscriptionHandler::subscriptions(); + let (subscriptions, subscriptions_weight) = T::SubscriptionHandler::subscriptions(); let num_publishers = subscriptions.len() as u32; let keys_per_publisher = subscriptions .first() @@ -280,35 +288,30 @@ pub mod pallet { .unwrap_or(0); let current_roots = Self::collect_publisher_roots(verified_proof, &subscriptions); - Self::process_published_data(verified_proof, ¤t_roots, &subscriptions); + let data_processing_weight = Self::process_published_data(verified_proof, ¤t_roots, &subscriptions); // Return total weight for all operations - T::WeightInfo::process_relay_proof_keys(num_publishers, keys_per_publisher) + subscriptions_weight + .saturating_add(data_processing_weight) + .saturating_add(T::WeightInfo::process_proof_excluding_handler(num_publishers, keys_per_publisher)) } } } pub trait WeightInfo { - fn get_subscriptions(n: u32, k: u32) -> Weight; fn collect_publisher_roots(n: u32) -> Weight; fn process_published_data(n: u32, k: u32) -> Weight; fn clear_stored_roots() -> Weight; - /// Total weight consumed by process_relay_proof_keys - /// Composes the weights of all sub-operations - fn process_relay_proof_keys(num_publishers: u32, keys_per_publisher: u32) -> Weight { - Self::get_subscriptions(num_publishers, keys_per_publisher) - .saturating_add(Self::collect_publisher_roots(num_publishers)) + /// Weight for processing relay proof excluding handler execution. + /// Benchmarked with no-op handler. Handler weights are added at runtime. + fn process_proof_excluding_handler(num_publishers: u32, keys_per_publisher: u32) -> Weight { + Self::collect_publisher_roots(num_publishers) .saturating_add(Self::process_published_data(num_publishers, keys_per_publisher)) } } impl WeightInfo for () { - fn get_subscriptions(_n: u32, _k: u32) -> Weight { - // TODO: Replace with proper benchmarked weights - Weight::from_parts(5_000, 0) - } - fn collect_publisher_roots(_n: u32) -> Weight { // TODO: Replace with proper benchmarked weights Weight::from_parts(10_000, 0) @@ -320,6 +323,7 @@ impl WeightInfo for () { } fn clear_stored_roots() -> Weight { - frame_support::weights::constants::RocksDbWeight::get().reads_writes(1, 1) + // TODO: Replace with proper benchmarked weights + Weight::from_parts(50_000, 0) } } diff --git a/cumulus/pallets/subscriber/src/mock.rs b/cumulus/pallets/subscriber/src/mock.rs index 0284178de96a0..15791b47b2518 100644 --- a/cumulus/pallets/subscriber/src/mock.rs +++ b/cumulus/pallets/subscriber/src/mock.rs @@ -34,12 +34,13 @@ parameter_types! { pub struct TestHandler; impl SubscriptionHandler for TestHandler { - fn subscriptions() -> Vec<(ParaId, Vec>)> { - TestSubscriptions::get() + fn subscriptions() -> (Vec<(ParaId, Vec>)>, Weight) { + (TestSubscriptions::get(), Weight::zero()) } - fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) { + fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) -> Weight { ReceivedData::mutate(|d| d.push((publisher, key, value))); + Weight::zero() } } From 47aada2f1c225c665d9a88e19cf61756daa91622 Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 15 Dec 2025 14:23:15 -0300 Subject: [PATCH 45/69] feat: account for data valua in benchmark --- .../pallets/subscriber/src/benchmarking.rs | 9 +++-- cumulus/pallets/subscriber/src/lib.rs | 36 +++++++++++-------- 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs index c3e1634e9d9e9..279f099aa2bbf 100644 --- a/cumulus/pallets/subscriber/src/benchmarking.rs +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -66,18 +66,22 @@ mod benchmarks { /// Benchmark processing published data from the relay proof. /// /// Worst case: all `n` publishers have updated data with `k` keys each that need processing. + /// Each value has size `s` bytes. Max is 2048 bytes (2KiB limit per publisher). #[benchmark] fn process_published_data( n: Linear<1, { T::MaxPublishers::get() }>, k: Linear<1, 10>, + s: Linear<1, 2048>, ) { let subscriptions = create_subscriptions(n, k); + // Calculate per-key value size to stay within 2KiB total per publisher + let value_size_per_key = (s / k.max(1)) as usize; let publishers: Vec<_> = (0..n) .map(|i| { let para_id = ParaId::from(1000 + i); let child_data: Vec<(Vec, Vec)> = (0..k) .map(|j| { - let value = vec![25u8; 100]; + let value = vec![25u8; value_size_per_key]; let encoded_value = value.encode(); (vec![i as u8, j as u8], encoded_value) }) @@ -88,10 +92,9 @@ mod benchmarks { let proof = build_sproof_with_child_data(&publishers); let current_roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); - let _weight; #[block] { - _weight = Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); + let _ = Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); } assert!(PreviousPublishedDataRoots::::get().len() == n as usize); } diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index c63df05e0bbb4..66e057d0aa955 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -192,16 +192,17 @@ pub mod pallet { relay_state_proof: &RelayChainStateProof, current_roots: &Vec<(ParaId, Vec)>, subscriptions: &[(ParaId, Vec>)], - ) -> Weight { + ) -> (Weight, u32) { // Load roots from previous block for change detection. let previous_roots = >::get(); // Early exit if no publishers have any data. if current_roots.is_empty() && previous_roots.is_empty() { - return T::DbWeight::get().reads(1); + return (T::DbWeight::get().reads(1), 0); } let mut total_handler_weight = Weight::zero(); + let mut total_bytes_decoded = 0u32; // Convert to map for efficient lookup by ParaId. let current_roots_map: BTreeMap> = @@ -224,9 +225,13 @@ pub mod pallet { for key in subscription_keys.iter() { match relay_state_proof.read_child_storage(&child_info, key) { Ok(Some(encoded_value)) => { + let encoded_size = encoded_value.len() as u32; + total_bytes_decoded = total_bytes_decoded.saturating_add(encoded_size); + match Vec::::decode(&mut &encoded_value[..]) { Ok(value) => { let value_size = value.len() as u32; + // Notify handler of new data. let handler_weight = T::SubscriptionHandler::on_data_updated( *publisher, @@ -270,7 +275,7 @@ pub mod pallet { .expect("MaxPublishers limit enforced in collect_publisher_roots; qed"); >::put(bounded_roots); - total_handler_weight + (total_handler_weight, total_bytes_decoded) } } @@ -282,32 +287,34 @@ pub mod pallet { fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight { let (subscriptions, subscriptions_weight) = T::SubscriptionHandler::subscriptions(); let num_publishers = subscriptions.len() as u32; - let keys_per_publisher = subscriptions - .first() - .map(|(_, keys)| keys.len() as u32) - .unwrap_or(0); + let total_keys = subscriptions.iter().map(|(_, keys)| keys.len() as u32).sum(); let current_roots = Self::collect_publisher_roots(verified_proof, &subscriptions); - let data_processing_weight = Self::process_published_data(verified_proof, ¤t_roots, &subscriptions); + let (handler_weight, total_bytes_decoded) = Self::process_published_data(verified_proof, ¤t_roots, &subscriptions); // Return total weight for all operations subscriptions_weight - .saturating_add(data_processing_weight) - .saturating_add(T::WeightInfo::process_proof_excluding_handler(num_publishers, keys_per_publisher)) + .saturating_add(handler_weight) + .saturating_add(T::WeightInfo::process_proof_excluding_handler(num_publishers, total_keys, total_bytes_decoded)) } } } pub trait WeightInfo { fn collect_publisher_roots(n: u32) -> Weight; - fn process_published_data(n: u32, k: u32) -> Weight; + fn process_published_data(n: u32, k: u32, s: u32) -> Weight; fn clear_stored_roots() -> Weight; /// Weight for processing relay proof excluding handler execution. /// Benchmarked with no-op handler. Handler weights are added at runtime. - fn process_proof_excluding_handler(num_publishers: u32, keys_per_publisher: u32) -> Weight { + /// + /// Parameters: + /// - `num_publishers`: Number of publishers being processed + /// - `num_keys`: Total number of keys across all publishers + /// - `total_bytes`: Total bytes of data being decoded + fn process_proof_excluding_handler(num_publishers: u32, num_keys: u32, total_bytes: u32) -> Weight { Self::collect_publisher_roots(num_publishers) - .saturating_add(Self::process_published_data(num_publishers, keys_per_publisher)) + .saturating_add(Self::process_published_data(num_publishers, num_keys, total_bytes)) } } @@ -317,8 +324,9 @@ impl WeightInfo for () { Weight::from_parts(10_000, 0) } - fn process_published_data(_n: u32, _k: u32) -> Weight { + fn process_published_data(_n: u32, _k: u32, _s: u32) -> Weight { // TODO: Replace with proper benchmarked weights + // Note: Real benchmarks will add per-byte overhead for _s Weight::from_parts(50_000, 0) } From a89d0751d19a66155b8dca2e860202c939498d0f Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 15 Dec 2025 16:00:49 -0300 Subject: [PATCH 46/69] feat: test utils and small optimization of Subs --- .../pallets/subscriber/src/benchmarking.rs | 2 +- cumulus/pallets/subscriber/src/lib.rs | 19 +++--- cumulus/pallets/subscriber/src/mock.rs | 66 +------------------ cumulus/pallets/subscriber/src/test_util.rs | 62 +++++++++++++++++ cumulus/pallets/subscriber/src/tests.rs | 10 ++- 5 files changed, 82 insertions(+), 77 deletions(-) create mode 100644 cumulus/pallets/subscriber/src/test_util.rs diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs index 279f099aa2bbf..9e65b393f9b05 100644 --- a/cumulus/pallets/subscriber/src/benchmarking.rs +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -18,7 +18,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use crate::{mock::build_sproof_with_child_data, Pallet as Subscriber}; +use crate::{test_util::build_sproof_with_child_data, Pallet as Subscriber}; use cumulus_primitives_core::ParaId; use frame_benchmarking::v2::*; use frame_support::traits::Get; diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 66e057d0aa955..80984ae2c7f92 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -42,6 +42,9 @@ pub use pallet::*; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +#[cfg(any(test, feature = "runtime-benchmarks"))] +mod test_util; +#[cfg(test)] mod mock; #[cfg(test)] mod tests; @@ -171,7 +174,7 @@ pub mod pallet { pub fn collect_publisher_roots( relay_state_proof: &RelayChainStateProof, subscriptions: &[(ParaId, Vec>)], - ) -> Vec<(ParaId, Vec)> { + ) -> BTreeMap> { subscriptions .iter() .take(T::MaxPublishers::get() as usize) @@ -190,7 +193,7 @@ pub mod pallet { pub fn process_published_data( relay_state_proof: &RelayChainStateProof, - current_roots: &Vec<(ParaId, Vec)>, + current_roots: &BTreeMap>, subscriptions: &[(ParaId, Vec>)], ) -> (Weight, u32) { // Load roots from previous block for change detection. @@ -204,14 +207,10 @@ pub mod pallet { let mut total_handler_weight = Weight::zero(); let mut total_bytes_decoded = 0u32; - // Convert to map for efficient lookup by ParaId. - let current_roots_map: BTreeMap> = - current_roots.iter().map(|(para_id, root)| (*para_id, root.clone())).collect(); - // Process each subscription. for (publisher, subscription_keys) in subscriptions { // Check if publisher has published data in this block. - if let Some(current_root) = current_roots_map.get(publisher) { + if let Some(current_root) = current_roots.get(publisher) { // Detect if child trie root changed since last block. let should_update = previous_roots .get(publisher) @@ -265,10 +264,10 @@ pub mod pallet { // Store current roots for next block's comparison. let bounded_roots: BoundedBTreeMap>, T::MaxPublishers> = - current_roots_map - .into_iter() + current_roots + .iter() .filter_map(|(para_id, root)| { - BoundedVec::try_from(root).ok().map(|bounded_root| (para_id, bounded_root)) + BoundedVec::try_from(root.clone()).ok().map(|bounded_root| (*para_id, bounded_root)) }) .collect::>() .try_into() diff --git a/cumulus/pallets/subscriber/src/mock.rs b/cumulus/pallets/subscriber/src/mock.rs index 15791b47b2518..c7120df75f407 100644 --- a/cumulus/pallets/subscriber/src/mock.rs +++ b/cumulus/pallets/subscriber/src/mock.rs @@ -4,13 +4,9 @@ #![cfg(test)] use super::*; -use codec::Encode; -use cumulus_pallet_parachain_system::RelayChainStateProof; use cumulus_primitives_core::ParaId; use frame_support::{derive_impl, parameter_types}; -use sp_runtime::{BuildStorage, StateVersion}; -use sp_state_machine::{Backend, TrieBackendBuilder}; -use sp_trie::{PrefixedMemoryDB, StorageProof}; +use sp_runtime::BuildStorage; type Block = frame_system::mocking::MockBlock; @@ -58,63 +54,3 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); t.into() } - -/// Build a relay chain state proof with child trie data for multiple publishers. -pub fn build_sproof_with_child_data( - publishers: &[(ParaId, Vec<(Vec, Vec)>)], -) -> RelayChainStateProof { - use sp_runtime::traits::HashingFor; - - let (db, root) = PrefixedMemoryDB::>::default_with_root(); - let state_version = StateVersion::default(); - let mut backend = TrieBackendBuilder::new(db, root).build(); - - let mut all_proofs = vec![]; - let mut main_trie_updates = vec![]; - - // Process each publisher - for (publisher_para_id, child_data) in publishers { - let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); - - // Insert child trie data - let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); - backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); - - // Get child trie root and prepare to insert it in main trie - let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; - let prefixed_key = child_info.prefixed_storage_key(); - main_trie_updates.push((prefixed_key.to_vec(), Some(child_root.encode()))); - - // Prove child trie keys - let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); - if !child_keys.is_empty() { - let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) - .expect("prove child read"); - all_proofs.push(child_proof); - } - } - - // Insert all child roots in main trie - backend.insert(vec![(None, main_trie_updates.clone())], state_version); - let root = *backend.root(); - - // Prove all child roots in main trie - let main_keys: Vec<_> = main_trie_updates.iter().map(|(k, _)| k.clone()).collect(); - let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, main_keys) - .expect("prove read"); - all_proofs.push(main_proof); - - // Merge all proofs - let proof = StorageProof::merge(all_proofs); - - RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") -} - -/// Build a relay chain state proof with child trie data for a single publisher. -/// This is a convenience wrapper for tests that only need one publisher. -pub fn build_test_proof( - publisher_para_id: ParaId, - child_data: Vec<(Vec, Vec)>, -) -> RelayChainStateProof { - build_sproof_with_child_data(&[(publisher_para_id, child_data)]) -} diff --git a/cumulus/pallets/subscriber/src/test_util.rs b/cumulus/pallets/subscriber/src/test_util.rs new file mode 100644 index 0000000000000..3dc0086141123 --- /dev/null +++ b/cumulus/pallets/subscriber/src/test_util.rs @@ -0,0 +1,62 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +#![cfg(any(test, feature = "runtime-benchmarks"))] + +use codec::Encode; +use cumulus_pallet_parachain_system::RelayChainStateProof; +use cumulus_primitives_core::ParaId; +use sp_runtime::StateVersion; +use sp_state_machine::{Backend, TrieBackendBuilder}; +use sp_trie::{PrefixedMemoryDB, StorageProof}; + +/// Build a relay chain state proof with child trie data for multiple publishers. +pub fn build_sproof_with_child_data( + publishers: &[(ParaId, Vec<(Vec, Vec)>)], +) -> RelayChainStateProof { + use sp_runtime::traits::HashingFor; + + let (db, root) = PrefixedMemoryDB::>::default_with_root(); + let state_version = StateVersion::default(); + let mut backend = TrieBackendBuilder::new(db, root).build(); + + let mut all_proofs = vec![]; + let mut main_trie_updates = vec![]; + + // Process each publisher + for (publisher_para_id, child_data) in publishers { + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); + + // Insert child trie data + let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); + backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); + + // Get child trie root and prepare to insert it in main trie + let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; + let prefixed_key = child_info.prefixed_storage_key(); + main_trie_updates.push((prefixed_key.to_vec(), Some(child_root.encode()))); + + // Prove child trie keys + let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); + if !child_keys.is_empty() { + let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) + .expect("prove child read"); + all_proofs.push(child_proof); + } + } + + // Insert all child roots in main trie + backend.insert(vec![(None, main_trie_updates.clone())], state_version); + let root = *backend.root(); + + // Prove all child roots in main trie + let main_keys: Vec<_> = main_trie_updates.iter().map(|(k, _)| k.clone()).collect(); + let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, main_keys) + .expect("prove read"); + all_proofs.push(main_proof); + + // Merge all proofs + let proof = StorageProof::merge(all_proofs); + + RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") +} diff --git a/cumulus/pallets/subscriber/src/tests.rs b/cumulus/pallets/subscriber/src/tests.rs index 88d2bd48fb1b8..f4667ec55ca89 100644 --- a/cumulus/pallets/subscriber/src/tests.rs +++ b/cumulus/pallets/subscriber/src/tests.rs @@ -4,11 +4,19 @@ #![cfg(test)] use super::*; -use crate::mock::*; +use crate::{mock::*, test_util::build_sproof_with_child_data}; use codec::Encode; use cumulus_primitives_core::ParaId; use frame_support::assert_ok; +/// Build a relay chain state proof with child trie data for a single publisher. +fn build_test_proof( + publisher_para_id: ParaId, + child_data: Vec<(Vec, Vec)>, +) -> cumulus_pallet_parachain_system::RelayChainStateProof { + build_sproof_with_child_data(&[(publisher_para_id, child_data)]) +} + #[test] fn process_relay_proof_keys_with_new_data_calls_handler() { new_test_ext().execute_with(|| { From 6177be1a4e80e870987696106636facb92167458 Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 15 Dec 2025 17:10:55 -0300 Subject: [PATCH 47/69] choir: some relevant comments on numbers and data --- cumulus/pallets/subscriber/src/benchmarking.rs | 10 +++++++--- polkadot/runtime/parachains/src/broadcaster/mod.rs | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs index 9e65b393f9b05..d7b1eb8c409dd 100644 --- a/cumulus/pallets/subscriber/src/benchmarking.rs +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -65,8 +65,12 @@ mod benchmarks { /// Benchmark processing published data from the relay proof. /// - /// Worst case: all `n` publishers have updated data with `k` keys each that need processing. - /// Each value has size `s` bytes. Max is 2048 bytes (2KiB limit per publisher). + /// Worst case: all publishers have updated data requiring processing. + /// + /// Parameters: + /// - `n`: Number of publishers with updated data + /// - `k`: Number of keys per publisher + /// - `s`: Total encoded bytes per publisher (max 2KiB) #[benchmark] fn process_published_data( n: Linear<1, { T::MaxPublishers::get() }>, @@ -74,7 +78,7 @@ mod benchmarks { s: Linear<1, 2048>, ) { let subscriptions = create_subscriptions(n, k); - // Calculate per-key value size to stay within 2KiB total per publisher + // SCALE encoding overhead (1-4 bytes) ignored as negligible compared to data benchmark ranges let value_size_per_key = (s / k.max(1)) as usize; let publishers: Vec<_> = (0..n) .map(|i| { diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index 2898f75b917c9..74c8601c71552 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -610,6 +610,7 @@ pub mod pallet { let mut size_delta: i64 = 0; for (key, value) in &data { + // 32 bytes for the hash key let new_size = 32u32.saturating_add(value.len() as u32); // If key already exists, subtract old value size From 627717e6ecdeaddd222c6a13627d33388a1f8e24 Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 15 Dec 2025 19:28:37 -0300 Subject: [PATCH 48/69] choir: simpler cargo of sub --- cumulus/pallets/subscriber/Cargo.toml | 32 +++++++++------------------ 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/cumulus/pallets/subscriber/Cargo.toml b/cumulus/pallets/subscriber/Cargo.toml index 98574da962399..b6e157da97002 100644 --- a/cumulus/pallets/subscriber/Cargo.toml +++ b/cumulus/pallets/subscriber/Cargo.toml @@ -24,18 +24,12 @@ cumulus-pallet-parachain-system = { workspace = true } cumulus-primitives-core = { workspace = true } # Benchmarking -frame-benchmarking = { optional = true, workspace = true } -polkadot-primitives = { optional = true, workspace = true } -sp-runtime = { optional = true, workspace = true } -sp-state-machine = { optional = true, workspace = true } -sp-trie = { optional = true, workspace = true } - -[dev-dependencies] -polkadot-primitives = { workspace = true, default-features = true } -sp-io = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } +frame-benchmarking = { workspace = true } +polkadot-primitives = { workspace = true } +sp-runtime = { workspace = true } +sp-state-machine = { workspace = true } +sp-trie = { workspace = true } +sp-io = { workspace = true } [features] default = ["std"] @@ -43,24 +37,20 @@ std = [ "codec/std", "cumulus-pallet-parachain-system/std", "cumulus-primitives-core/std", - "frame-benchmarking?/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", - "polkadot-primitives?/std", + "polkadot-primitives/std", "scale-info/std", "sp-core/std", - "sp-runtime?/std", - "sp-state-machine?/std", + "sp-runtime/std", + "sp-state-machine/std", "sp-std/std", - "sp-trie?/std", + "sp-trie/std", ] runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", - "polkadot-primitives", - "sp-runtime", - "sp-state-machine", - "sp-trie", ] From d21b1ff4f6386c8d77ae9cd4887be9a09ea911dc Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 15 Dec 2025 20:39:59 -0300 Subject: [PATCH 49/69] fix: separation of test utils and empty proof for bench --- .../pallets/subscriber/src/benchmarking.rs | 34 +++++++++++++++---- cumulus/pallets/subscriber/src/test_util.rs | 2 +- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs index d7b1eb8c409dd..419dbc2fbdf45 100644 --- a/cumulus/pallets/subscriber/src/benchmarking.rs +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -18,11 +18,13 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use crate::{test_util::build_sproof_with_child_data, Pallet as Subscriber}; +use crate::Pallet as Subscriber; +use cumulus_pallet_parachain_system::RelayChainStateProof; use cumulus_primitives_core::ParaId; use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::RawOrigin; +use sp_trie::StorageProof; /// Create test subscriptions for benchmarking. fn create_subscriptions(n: u32, keys_per_publisher: u32) -> Vec<(ParaId, Vec>)> { @@ -39,6 +41,20 @@ fn create_subscriptions(n: u32, keys_per_publisher: u32) -> Vec<(ParaId, Vec RelayChainStateProof { + use sp_runtime::traits::BlakeTwo256; + use sp_trie::{empty_trie_root, LayoutV1}; + + let proof = StorageProof::empty(); + let root = empty_trie_root::>(); + RelayChainStateProof::new(ParaId::from(100), root.into(), proof).expect("valid proof") +} + #[benchmarks] mod benchmarks { use super::*; @@ -51,16 +67,18 @@ mod benchmarks { n: Linear<1, { T::MaxPublishers::get() }>, ) { let subscriptions = create_subscriptions(n, 1); - let publishers: Vec<_> = (0..n) + let _publishers: Vec<_> = (0..n) .map(|i| (ParaId::from(1000 + i), vec![(vec![i as u8], vec![25u8])])) .collect(); - let proof = build_sproof_with_child_data(&publishers); + // TODO: Use _publishers data to build proof once we have values + let proof = benchmark_relay_proof(); let roots; #[block] { roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); } - assert!(roots.len() == n as usize); + // TODO: Update assertion once proof contains actual data + //assert!(roots.len() <= n as usize); } /// Benchmark processing published data from the relay proof. @@ -80,7 +98,7 @@ mod benchmarks { let subscriptions = create_subscriptions(n, k); // SCALE encoding overhead (1-4 bytes) ignored as negligible compared to data benchmark ranges let value_size_per_key = (s / k.max(1)) as usize; - let publishers: Vec<_> = (0..n) + let _publishers: Vec<_> = (0..n) .map(|i| { let para_id = ParaId::from(1000 + i); let child_data: Vec<(Vec, Vec)> = (0..k) @@ -93,14 +111,16 @@ mod benchmarks { (para_id, child_data) }) .collect(); - let proof = build_sproof_with_child_data(&publishers); + // TODO: Use _publishers data to build proof once we have values + let proof = benchmark_relay_proof(); let current_roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); #[block] { let _ = Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); } - assert!(PreviousPublishedDataRoots::::get().len() == n as usize); + // TODO: Update assertion once proof contains actual data + //assert!(PreviousPublishedDataRoots::::get().len() <= n as usize); } #[benchmark] diff --git a/cumulus/pallets/subscriber/src/test_util.rs b/cumulus/pallets/subscriber/src/test_util.rs index 3dc0086141123..7f7f7259bb0a2 100644 --- a/cumulus/pallets/subscriber/src/test_util.rs +++ b/cumulus/pallets/subscriber/src/test_util.rs @@ -1,7 +1,7 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -#![cfg(any(test, feature = "runtime-benchmarks"))] +#![cfg(test)] use codec::Encode; use cumulus_pallet_parachain_system::RelayChainStateProof; From 928081d98b59c5611edfd1ea5b4421e6c1c98f98 Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 15 Dec 2025 20:41:47 -0300 Subject: [PATCH 50/69] choir: unnecesary flag --- cumulus/pallets/subscriber/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 80984ae2c7f92..67b6997df7bdc 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -42,7 +42,7 @@ pub use pallet::*; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -#[cfg(any(test, feature = "runtime-benchmarks"))] +#[cfg(test)] mod test_util; #[cfg(test)] mod mock; From 3d34371994485fcf759a4c91c75d1010b2bbcbbc Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 15 Dec 2025 22:35:00 -0300 Subject: [PATCH 51/69] feat: no std proof generation and ah westend benchmark --- Cargo.lock | 3 + cumulus/pallets/subscriber/Cargo.toml | 9 + .../pallets/subscriber/src/benchmarking.rs | 32 +-- cumulus/pallets/subscriber/src/lib.rs | 2 +- cumulus/pallets/subscriber/src/test_util.rs | 199 +++++++++++++----- .../assets/asset-hub-westend/Cargo.toml | 4 + .../assets/asset-hub-westend/src/lib.rs | 22 ++ .../src/weights/cumulus_pallet_subscriber.rs | 90 ++++++++ .../asset-hub-westend/src/weights/mod.rs | 1 + 9 files changed, 288 insertions(+), 74 deletions(-) create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs diff --git a/Cargo.lock b/Cargo.lock index 3089aff2c56cc..adb3f03b26f25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1371,6 +1371,7 @@ dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", + "cumulus-pallet-subscriber", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", @@ -4790,6 +4791,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "hash-db", "parity-scale-codec", "polkadot-primitives", "scale-info", @@ -4799,6 +4801,7 @@ dependencies = [ "sp-state-machine", "sp-std 14.0.0", "sp-trie", + "trie-db", ] [[package]] diff --git a/cumulus/pallets/subscriber/Cargo.toml b/cumulus/pallets/subscriber/Cargo.toml index b6e157da97002..4dc03f3517939 100644 --- a/cumulus/pallets/subscriber/Cargo.toml +++ b/cumulus/pallets/subscriber/Cargo.toml @@ -25,11 +25,13 @@ cumulus-primitives-core = { workspace = true } # Benchmarking frame-benchmarking = { workspace = true } +hash-db = { workspace = true } polkadot-primitives = { workspace = true } sp-runtime = { workspace = true } sp-state-machine = { workspace = true } sp-trie = { workspace = true } sp-io = { workspace = true } +trie-db = { workspace = true } [features] default = ["std"] @@ -40,6 +42,7 @@ std = [ "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "hash-db/std", "polkadot-primitives/std", "scale-info/std", "sp-core/std", @@ -47,6 +50,7 @@ std = [ "sp-state-machine/std", "sp-std/std", "sp-trie/std", + "trie-db/std", ] runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", @@ -54,3 +58,8 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = [ + "cumulus-pallet-parachain-system/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", +] diff --git a/cumulus/pallets/subscriber/src/benchmarking.rs b/cumulus/pallets/subscriber/src/benchmarking.rs index 419dbc2fbdf45..ca0b7c4b98b4a 100644 --- a/cumulus/pallets/subscriber/src/benchmarking.rs +++ b/cumulus/pallets/subscriber/src/benchmarking.rs @@ -24,7 +24,6 @@ use cumulus_primitives_core::ParaId; use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::RawOrigin; -use sp_trie::StorageProof; /// Create test subscriptions for benchmarking. fn create_subscriptions(n: u32, keys_per_publisher: u32) -> Vec<(ParaId, Vec>)> { @@ -41,18 +40,9 @@ fn create_subscriptions(n: u32, keys_per_publisher: u32) -> Vec<(ParaId, Vec RelayChainStateProof { - use sp_runtime::traits::BlakeTwo256; - use sp_trie::{empty_trie_root, LayoutV1}; - - let proof = StorageProof::empty(); - let root = empty_trie_root::>(); - RelayChainStateProof::new(ParaId::from(100), root.into(), proof).expect("valid proof") +/// Create a relay chain state proof for benchmarking with actual child trie data. +fn benchmark_relay_proof(publishers: &[(ParaId, Vec<(Vec, Vec)>)]) -> RelayChainStateProof { + crate::test_util::bench_proof_builder::build_sproof_with_child_data(publishers) } #[benchmarks] @@ -67,18 +57,16 @@ mod benchmarks { n: Linear<1, { T::MaxPublishers::get() }>, ) { let subscriptions = create_subscriptions(n, 1); - let _publishers: Vec<_> = (0..n) + let publishers: Vec<_> = (0..n) .map(|i| (ParaId::from(1000 + i), vec![(vec![i as u8], vec![25u8])])) .collect(); - // TODO: Use _publishers data to build proof once we have values - let proof = benchmark_relay_proof(); + let proof = benchmark_relay_proof(&publishers); let roots; #[block] { roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); } - // TODO: Update assertion once proof contains actual data - //assert!(roots.len() <= n as usize); + assert_eq!(roots.len(), n as usize); } /// Benchmark processing published data from the relay proof. @@ -98,7 +86,7 @@ mod benchmarks { let subscriptions = create_subscriptions(n, k); // SCALE encoding overhead (1-4 bytes) ignored as negligible compared to data benchmark ranges let value_size_per_key = (s / k.max(1)) as usize; - let _publishers: Vec<_> = (0..n) + let publishers: Vec<_> = (0..n) .map(|i| { let para_id = ParaId::from(1000 + i); let child_data: Vec<(Vec, Vec)> = (0..k) @@ -111,16 +99,14 @@ mod benchmarks { (para_id, child_data) }) .collect(); - // TODO: Use _publishers data to build proof once we have values - let proof = benchmark_relay_proof(); + let proof = benchmark_relay_proof(&publishers); let current_roots = Subscriber::::collect_publisher_roots(&proof, &subscriptions); #[block] { let _ = Subscriber::::process_published_data(&proof, ¤t_roots, &subscriptions); } - // TODO: Update assertion once proof contains actual data - //assert!(PreviousPublishedDataRoots::::get().len() <= n as usize); + assert_eq!(PreviousPublishedDataRoots::::get().len(), n as usize); } #[benchmark] diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 67b6997df7bdc..80984ae2c7f92 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -42,7 +42,7 @@ pub use pallet::*; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -#[cfg(test)] +#[cfg(any(test, feature = "runtime-benchmarks"))] mod test_util; #[cfg(test)] mod mock; diff --git a/cumulus/pallets/subscriber/src/test_util.rs b/cumulus/pallets/subscriber/src/test_util.rs index 7f7f7259bb0a2..e1b4a8efdd3a0 100644 --- a/cumulus/pallets/subscriber/src/test_util.rs +++ b/cumulus/pallets/subscriber/src/test_util.rs @@ -1,62 +1,161 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -#![cfg(test)] - use codec::Encode; -use cumulus_pallet_parachain_system::RelayChainStateProof; use cumulus_primitives_core::ParaId; -use sp_runtime::StateVersion; -use sp_state_machine::{Backend, TrieBackendBuilder}; -use sp_trie::{PrefixedMemoryDB, StorageProof}; - -/// Build a relay chain state proof with child trie data for multiple publishers. -pub fn build_sproof_with_child_data( - publishers: &[(ParaId, Vec<(Vec, Vec)>)], -) -> RelayChainStateProof { - use sp_runtime::traits::HashingFor; - - let (db, root) = PrefixedMemoryDB::>::default_with_root(); - let state_version = StateVersion::default(); - let mut backend = TrieBackendBuilder::new(db, root).build(); - - let mut all_proofs = vec![]; - let mut main_trie_updates = vec![]; - - // Process each publisher - for (publisher_para_id, child_data) in publishers { - let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); - - // Insert child trie data - let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); - backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); - - // Get child trie root and prepare to insert it in main trie - let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; - let prefixed_key = child_info.prefixed_storage_key(); - main_trie_updates.push((prefixed_key.to_vec(), Some(child_root.encode()))); - - // Prove child trie keys - let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); - if !child_keys.is_empty() { - let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) - .expect("prove child read"); - all_proofs.push(child_proof); + +#[cfg(test)] +mod std_proof_builder { + use super::*; + use cumulus_pallet_parachain_system::RelayChainStateProof; + use sp_runtime::StateVersion; + use sp_state_machine::{Backend, TrieBackendBuilder}; + use sp_trie::{PrefixedMemoryDB, StorageProof}; + + /// Build a relay chain state proof with child trie data for multiple publishers. + pub fn build_sproof_with_child_data( + publishers: &[(ParaId, Vec<(Vec, Vec)>)], + ) -> RelayChainStateProof { + use sp_runtime::traits::HashingFor; + + let (db, root) = PrefixedMemoryDB::>::default_with_root(); + let state_version = StateVersion::default(); + let mut backend = TrieBackendBuilder::new(db, root).build(); + + let mut all_proofs = vec![]; + let mut main_trie_updates = vec![]; + + // Process each publisher + for (publisher_para_id, child_data) in publishers { + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); + + // Insert child trie data + let child_kv: Vec<_> = child_data.iter().map(|(k, v)| (k.clone(), Some(v.clone()))).collect(); + backend.insert(vec![(Some(child_info.clone()), child_kv)], state_version); + + // Get child trie root and prepare to insert it in main trie + let child_root = backend.child_storage_root(&child_info, core::iter::empty(), state_version).0; + let prefixed_key = child_info.prefixed_storage_key(); + main_trie_updates.push((prefixed_key.to_vec(), Some(child_root.encode()))); + + // Prove child trie keys + let child_keys: Vec<_> = child_data.iter().map(|(k, _)| k.clone()).collect(); + if !child_keys.is_empty() { + let child_proof = sp_state_machine::prove_child_read_on_trie_backend(&backend, &child_info, child_keys) + .expect("prove child read"); + all_proofs.push(child_proof); + } } + + // Insert all child roots in main trie + backend.insert(vec![(None, main_trie_updates.clone())], state_version); + let root = *backend.root(); + + // Prove all child roots in main trie + let main_keys: Vec<_> = main_trie_updates.iter().map(|(k, _)| k.clone()).collect(); + let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, main_keys) + .expect("prove read"); + all_proofs.push(main_proof); + + // Merge all proofs + let proof = StorageProof::merge(all_proofs); + + RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") } +} - // Insert all child roots in main trie - backend.insert(vec![(None, main_trie_updates.clone())], state_version); - let root = *backend.root(); +#[cfg(test)] +pub use std_proof_builder::build_sproof_with_child_data; - // Prove all child roots in main trie - let main_keys: Vec<_> = main_trie_updates.iter().map(|(k, _)| k.clone()).collect(); - let main_proof = sp_state_machine::prove_read_on_trie_backend(&backend, main_keys) - .expect("prove read"); - all_proofs.push(main_proof); +/// no_std-compatible proof builder for benchmarks +#[cfg(feature = "runtime-benchmarks")] +pub mod bench_proof_builder { + use super::*; + use alloc::vec::Vec; + use cumulus_pallet_parachain_system::RelayChainStateProof; + use sp_runtime::traits::BlakeTwo256; + use sp_trie::{trie_types::TrieDBMutBuilderV1, recorder_ext::RecorderExt, LayoutV1, MemoryDB, Recorder, StorageProof, TrieDBBuilder, TrieMut}; + use trie_db::Trie; - // Merge all proofs - let proof = StorageProof::merge(all_proofs); + /// Record all trie keys + fn record_all_trie_keys( + db: &DB, + root: &sp_trie::TrieHash, + ) -> Result>, sp_std::boxed::Box>> + where + DB: hash_db::HashDBRef, + { + let mut recorder = Recorder::::new(); + let trie = TrieDBBuilder::::new(db, root).with_recorder(&mut recorder).build(); + for x in trie.iter()? { + let (key, _) = x?; + trie.get(&key)?; + } + Ok(recorder.into_raw_storage_proof()) + } - RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") + /// Build relay chain state proof w/ child trie data + pub fn build_sproof_with_child_data( + publishers: &[(ParaId, Vec<(Vec, Vec)>)], + ) -> RelayChainStateProof { + use polkadot_primitives::Hash as RelayHash; + use sp_trie::empty_trie_root; + + // Build child tries and collect roots + let mut child_roots = alloc::vec::Vec::new(); + let mut all_nodes = alloc::vec::Vec::new(); + + for (publisher_para_id, child_data) in publishers { + use hash_db::{HashDB, EMPTY_PREFIX}; + + let empty_root = empty_trie_root::>(); + let mut child_root = empty_root; + let mut child_mdb = MemoryDB::::new(&[]); + // Insert empty trie node so TrieDBMut can find it + child_mdb.insert(EMPTY_PREFIX, &[0u8]); + + { + let mut child_trie = TrieDBMutBuilderV1::::new(&mut child_mdb, &mut child_root).build(); + for (key, value) in child_data { + child_trie.insert(key, value).expect("insert in bench"); + } + } + + // Collect child trie nodes + let child_nodes = record_all_trie_keys::, _>(&child_mdb, &child_root) + .expect("record child trie"); + all_nodes.extend(child_nodes); + + // Store child root for main trie + let child_info = sp_core::storage::ChildInfo::new_default(&(b"pubsub", *publisher_para_id).encode()); + let prefixed_key = child_info.prefixed_storage_key(); + child_roots.push((prefixed_key.to_vec(), child_root.encode())); + } + + // Build main trie w/ child roots + use hash_db::{HashDB, EMPTY_PREFIX}; + + let empty_root = empty_trie_root::>(); + let mut main_root = empty_root; + let mut main_mdb = MemoryDB::::new(&[]); + // Insert empty trie node so TrieDBMut can find it + main_mdb.insert(EMPTY_PREFIX, &[0u8]); + + { + let mut main_trie = TrieDBMutBuilderV1::::new(&mut main_mdb, &mut main_root).build(); + for (key, value) in &child_roots { + main_trie.insert(key, value).expect("insert in bench"); + } + } + + // Collect main trie nodes + let main_nodes = record_all_trie_keys::, _>(&main_mdb, &main_root) + .expect("record main trie"); + all_nodes.extend(main_nodes); + + let proof = StorageProof::new(all_nodes); + let root: RelayHash = main_root.into(); + + RelayChainStateProof::new(ParaId::from(100), root, proof).expect("valid proof") + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 23bc88a16ccf0..e1dac1c813df6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -114,6 +114,7 @@ assets-common = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-subscriber = { workspace = true } cumulus-pallet-weight-reclaim = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } @@ -152,6 +153,7 @@ runtime-benchmarks = [ "assets-common/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", + "cumulus-pallet-subscriber/runtime-benchmarks", "cumulus-pallet-weight-reclaim/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", @@ -225,6 +227,7 @@ try-runtime = [ "assets-common/try-runtime", "cumulus-pallet-aura-ext/try-runtime", "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-subscriber/try-runtime", "cumulus-pallet-weight-reclaim/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -298,6 +301,7 @@ std = [ "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", + "cumulus-pallet-subscriber/std", "cumulus-pallet-weight-reclaim/std", "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f0cb527c42f09..38cbcebfc42a3 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -915,6 +915,26 @@ type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< impl parachain_info::Config for Runtime {} +parameter_types! { + pub const MaxPublishers: u32 = 100; +} + +pub struct NoOpSubscriptionHandler; +impl cumulus_pallet_subscriber::SubscriptionHandler for NoOpSubscriptionHandler { + fn subscriptions() -> (Vec<(ParaId, Vec>)>, Weight) { + (vec![], Weight::zero()) + } + fn on_data_updated(_publisher: ParaId, _key: Vec, _value: Vec) -> Weight { + Weight::zero() + } +} + +impl cumulus_pallet_subscriber::Config for Runtime { + type SubscriptionHandler = NoOpSubscriptionHandler; + type WeightInfo = weights::cumulus_pallet_subscriber::WeightInfo; + type MaxPublishers = MaxPublishers; +} + parameter_types! { pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; } @@ -1324,6 +1344,7 @@ construct_runtime!( ParachainInfo: parachain_info = 4, WeightReclaim: cumulus_pallet_weight_reclaim = 5, MultiBlockMigrations: pallet_migrations = 6, + Subscriber: cumulus_pallet_subscriber = 255, Preimage: pallet_preimage = 7, Scheduler: pallet_scheduler = 8, Sudo: pallet_sudo = 9, @@ -1714,6 +1735,7 @@ mod benches { [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] + [cumulus_pallet_subscriber, Subscriber] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_treasury, Treasury] [pallet_vesting, Vesting] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs new file mode 100644 index 0000000000000..42b566b219251 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_subscriber` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-12-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `192.168.1.2`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-westend-dev +// --wasm-execution=compiled +// --pallet=cumulus_pallet_subscriber +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs +// --header=./cumulus/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_subscriber`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_subscriber::WeightInfo for WeightInfo { + /// The range of component `n` is `[1, 100]`. + fn collect_publisher_roots(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_289 + .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) + } + /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) + /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 100]`. + /// The range of component `k` is `[1, 10]`. + /// The range of component `s` is `[1, 2048]`. + fn process_published_data(n: u32, k: u32, _s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `5187` + // Minimum execution time: 51_000_000 picoseconds. + Weight::from_parts(51_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + // Standard Error: 448_042 + .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) + // Standard Error: 4_535_424 + .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) + /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) + fn clear_stored_roots() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `5187` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index c8e94609da202..2df84997cfbd0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -15,6 +15,7 @@ pub mod block_weights; pub mod cumulus_pallet_parachain_system; +pub mod cumulus_pallet_subscriber; pub mod cumulus_pallet_weight_reclaim; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; From 0f0081253042550fe3a151e909ed2dca5707b56e Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 09:18:35 -0300 Subject: [PATCH 52/69] feat: weights and benchmark details --- cumulus/pallets/subscriber/src/lib.rs | 71 ++++++- .../src/broadcaster/benchmarking.rs | 14 +- polkadot/runtime/parachains/src/mock.rs | 4 +- polkadot/runtime/rococo/src/lib.rs | 45 +++- polkadot/runtime/rococo/src/weights/mod.rs | 1 + ...polkadot_runtime_parachains_broadcaster.rs | 197 ++++++++++++++++++ .../runtime/rococo/src/weights/xcm/mod.rs | 5 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 23 ++ .../src/generic/benchmarking.rs | 17 +- .../pallet-xcm-benchmarks/src/generic/mock.rs | 5 + .../pallet-xcm-benchmarks/src/generic/mod.rs | 9 + polkadot/xcm/src/v5/mod.rs | 2 +- 12 files changed, 368 insertions(+), 25 deletions(-) create mode 100644 polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 80984ae2c7f92..f0bfdd723e817 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -317,20 +317,73 @@ pub trait WeightInfo { } } +/// Weights for cumulus_pallet_subscriber using the Substrate node and recommended hardware. +pub struct SubstrateWeight(core::marker::PhantomData); +impl WeightInfo for SubstrateWeight { + /// The range of component `n` is `[1, 100]`. + fn collect_publisher_roots(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_289 + .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) + } + /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) + /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 100]`. + /// The range of component `k` is `[1, 10]`. + /// The range of component `s` is `[1, 2048]`. + fn process_published_data(n: u32, k: u32, _s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `5187` + // Minimum execution time: 51_000_000 picoseconds. + Weight::from_parts(51_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + // Standard Error: 448_042 + .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) + // Standard Error: 4_535_424 + .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) + /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) + fn clear_stored_roots() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `5187` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} + impl WeightInfo for () { - fn collect_publisher_roots(_n: u32) -> Weight { - // TODO: Replace with proper benchmarked weights - Weight::from_parts(10_000, 0) + fn collect_publisher_roots(n: u32) -> Weight { + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) } - fn process_published_data(_n: u32, _k: u32, _s: u32) -> Weight { - // TODO: Replace with proper benchmarked weights - // Note: Real benchmarks will add per-byte overhead for _s - Weight::from_parts(50_000, 0) + fn process_published_data(n: u32, k: u32, _s: u32) -> Weight { + Weight::from_parts(51_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) + .saturating_add(Weight::from_parts(1_000_000, 0)) + .saturating_add(Weight::from_parts(1_000_000, 0)) } fn clear_stored_roots() -> Weight { - // TODO: Replace with proper benchmarked weights - Weight::from_parts(50_000, 0) + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(Weight::from_parts(1_000_000, 0)) + .saturating_add(Weight::from_parts(1_000_000, 0)) } } diff --git a/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs index 899f34f87864b..58831cce8515a 100644 --- a/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs +++ b/polkadot/runtime/parachains/src/broadcaster/benchmarking.rs @@ -28,6 +28,7 @@ type BalanceOf = #[benchmarks] mod benchmarks { use super::*; + use alloc::vec; use frame_system::RawOrigin; #[benchmark] @@ -66,6 +67,17 @@ mod benchmarks { Broadcaster::::register_publisher(RawOrigin::Signed(caller).into(), para_id) .unwrap(); + // Calculate max value size to stay within MaxTotalStorageSize and MaxValueLength + // Total size = sum of (32 bytes key + value_len) for all keys + let max_total_size = T::MaxTotalStorageSize::get() as usize; + let max_value_length = T::MaxValueLength::get() as usize; + let key_size = 32usize; + let max_value_size = (max_total_size / k as usize) + .saturating_sub(key_size) + .min(max_value_length) + .max(1); + let value = vec![0u8; max_value_size]; + // Publish k keys in batches to respect MaxPublishItems limit let max_items = T::MaxPublishItems::get(); for batch_start in (0..k).step_by(max_items as usize) { @@ -75,7 +87,7 @@ mod benchmarks { let mut key_data = b"key_".to_vec(); key_data.extend_from_slice(&i.to_be_bytes()); let key = blake2_256(&key_data); - data.push((key, b"value".to_vec())); + data.push((key, value.clone())); } Broadcaster::::handle_publish(para_id, data).unwrap(); } diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 217d7dd3632ed..cc65d33b7802e 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -218,9 +218,9 @@ impl crate::shared::Config for Test { } parameter_types! { - pub const MaxPublishItems: u32 = 16; + pub const MaxPublishItems: u32 = 10; pub const MaxValueLength: u32 = 1024; - pub const MaxStoredKeys: u32 = 50; + pub const MaxStoredKeys: u32 = 50; pub const MaxTotalStorageSize: u32 = 2048; // 2 KiB pub const MaxPublishers: u32 = 1000; } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 38b1f6cefe6b9..25579a5a903ce 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1229,7 +1229,7 @@ impl parachains_slashing::Config for Runtime { parameter_types! { pub const MaxPublishItems: u32 = 10; pub const MaxValueLength: u32 = 1024; - pub const MaxStoredKeys: u32 = 100; + pub const MaxStoredKeys: u32 = 50; pub const MaxTotalStorageSize: u32 = 2048; // 2 KiB pub const MaxPublishers: u32 = 1000; pub const PublisherDeposit: Balance = 100 * UNITS; @@ -1238,7 +1238,7 @@ parameter_types! { impl parachains_broadcaster::Config for Runtime { type Currency = Balances; type RuntimeHoldReason = RuntimeHoldReason; - type WeightInfo = (); + type WeightInfo = weights::polkadot_runtime_parachains_broadcaster::WeightInfo; type MaxPublishItems = MaxPublishItems; type MaxValueLength = MaxValueLength; type MaxStoredKeys = MaxStoredKeys; @@ -2682,6 +2682,47 @@ sp_api::impl_runtime_apis! { Ok(AssetHub::get()) } + fn publish_origin() -> Result { + Ok(AssetHub::get()) + } + + fn ensure_publisher_registered(origin: &Location) -> Result<(), BenchmarkError> { + use frame_benchmarking::whitelisted_caller; + use frame_support::ensure; + + // Extract parachain ID from origin + let para_id = match origin.unpack() { + (0, [Junction::Parachain(id)]) => { + polkadot_primitives::Id::from(*id) + }, + (1, [Junction::Parachain(id), ..]) => { + polkadot_primitives::Id::from(*id) + }, + _ => { + return Err(BenchmarkError::Stop("Invalid origin for publisher registration")) + }, + }; + + // Force register the publisher with zero deposit for benchmarking + let manager: AccountId = whitelisted_caller(); + Broadcaster::force_register_publisher( + RuntimeOrigin::root(), + manager, + 0u128, + para_id, + ) + .map_err(|_| BenchmarkError::Stop("Failed to register publisher"))?; + + // Verify registration succeeded + let is_registered = parachains_broadcaster::RegisteredPublishers::::contains_key(para_id); + ensure!( + is_registered, + BenchmarkError::Stop("Publisher not registered after force_register") + ); + + Ok(()) + } + fn claimable_asset() -> Result<(Location, Location, Assets), BenchmarkError> { let origin = AssetHub::get(); let assets: Assets = (AssetId(TokenLocation::get()), 1_000 * UNITS).into(); diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 1c030c444ac59..95afa287f2c5b 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -54,6 +54,7 @@ pub mod polkadot_runtime_common_crowdloan; pub mod polkadot_runtime_common_identity_migrator; pub mod polkadot_runtime_common_paras_registrar; pub mod polkadot_runtime_common_slots; +pub mod polkadot_runtime_parachains_broadcaster; pub mod polkadot_runtime_parachains_configuration; pub mod polkadot_runtime_parachains_coretime; pub mod polkadot_runtime_parachains_disputes; diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs new file mode 100644 index 0000000000000..7c35d80517854 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs @@ -0,0 +1,197 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::broadcaster` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-12-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `192.168.1.2`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=20 +// --pallet=polkadot_runtime_parachains::broadcaster +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs +// --header=./polkadot/file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::broadcaster`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::broadcaster::WeightInfo for WeightInfo { + /// Storage: `Broadcaster::RegisteredPublishers` (r:2 w:1) + /// Proof: `Broadcaster::RegisteredPublishers` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + fn register_publisher() -> Weight { + // Proof Size summary in bytes: + // Measured: `272` + // Estimated: `6060` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(50_000_000, 0) + .saturating_add(Weight::from_parts(0, 6060)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Broadcaster::RegisteredPublishers` (r:2 w:1) + /// Proof: `Broadcaster::RegisteredPublishers` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + fn force_register_publisher() -> Weight { + // Proof Size summary in bytes: + // Measured: `272` + // Estimated: `6060` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 6060)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Broadcaster::PublishedKeys` (r:1 w:1) + /// Proof: `Broadcaster::PublishedKeys` (`max_values`: None, `max_size`: Some(1613), added: 4088, mode: `MaxEncodedLen`) + /// Storage: `Broadcaster::PublisherExists` (r:0 w:1) + /// Proof: `Broadcaster::PublisherExists` (`max_values`: None, `max_size`: Some(13), added: 2488, mode: `MaxEncodedLen`) + /// Storage: `Broadcaster::TotalStorageSize` (r:0 w:1) + /// Proof: `Broadcaster::TotalStorageSize` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x15ac8beec98bc93b831fe0c1c3a4973a81a08a41496dab0f714e0b1a36f6907c` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x15ac8beec98bc93b831fe0c1c3a4973a81a08a41496dab0f714e0b1a36f6907c` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x16af40e99eac4ec67683e7084135c6d39fb8c295b6588ed52dcfedd44a5d4edf` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x16af40e99eac4ec67683e7084135c6d39fb8c295b6588ed52dcfedd44a5d4edf` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x16be8f420223354f5ff3201e57c3b8628b3e5b3e209dfcaa89ece4a8e5c68b1d` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x16be8f420223354f5ff3201e57c3b8628b3e5b3e209dfcaa89ece4a8e5c68b1d` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x28b4fdcd9d46acf522102ea5adf42c1a626443b1afef620ef524c61784fc2383` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x28b4fdcd9d46acf522102ea5adf42c1a626443b1afef620ef524c61784fc2383` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x2f1ce9367a8a7f59d87e877861c262dd7b1ca1e976282b28a3d6d7f63282304e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x2f1ce9367a8a7f59d87e877861c262dd7b1ca1e976282b28a3d6d7f63282304e` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x2fc3acc91091d2a0f88f79a00144305f85993b39b3ce676da78b6ec1e99cee9d` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x2fc3acc91091d2a0f88f79a00144305f85993b39b3ce676da78b6ec1e99cee9d` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x4253a98a38b2917c26e0dcd9f1f3591563c586db07c30c4288a1329f03d5f345` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x4253a98a38b2917c26e0dcd9f1f3591563c586db07c30c4288a1329f03d5f345` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x4a2e603f4ccddc5063e87b25384bc25b8e45c6f0d536292a928ff838e3dfe777` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x4a2e603f4ccddc5063e87b25384bc25b8e45c6f0d536292a928ff838e3dfe777` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x52f26ad87bbab43295b479cda10e4e7e5856670d8110827b328ddffe176d50f9` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x52f26ad87bbab43295b479cda10e4e7e5856670d8110827b328ddffe176d50f9` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x542b58fa02b321fe24d779d0c9487211e8703bb5137aafcfd90293bfd9f62686` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x542b58fa02b321fe24d779d0c9487211e8703bb5137aafcfd90293bfd9f62686` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x56b3cd6cecdf8d5c1bcc82536c0f2a84960aa569dfe456df8b7c6a84a649f5f2` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x56b3cd6cecdf8d5c1bcc82536c0f2a84960aa569dfe456df8b7c6a84a649f5f2` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x5d6c339abbb79e03a57a8ed9dc802d9c6e6bf5a7aca1b846090e13282d8d7ef9` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x5d6c339abbb79e03a57a8ed9dc802d9c6e6bf5a7aca1b846090e13282d8d7ef9` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x5e30b2f53e107c8c9a7b794d5b2ec87170b9ce607738911d525166c5c6ce9d3f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x5e30b2f53e107c8c9a7b794d5b2ec87170b9ce607738911d525166c5c6ce9d3f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x5e494c16c55fc5d253faf1f5c631f7629df68cbb892e2d2275d2ba0973251b33` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x5e494c16c55fc5d253faf1f5c631f7629df68cbb892e2d2275d2ba0973251b33` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x65706813667bdea8995a6b3eee946258218c0dee5b61a109f67e6d0bdaeeb0c1` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x65706813667bdea8995a6b3eee946258218c0dee5b61a109f67e6d0bdaeeb0c1` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x65a4b2252574fa1faf8c982779faf7c433d71f8d0724aac8a9791ed68757508b` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x65a4b2252574fa1faf8c982779faf7c433d71f8d0724aac8a9791ed68757508b` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x68d11757b0b819e8392e9cf5a0dcc019dab990c92b5884070bbbccd46ac35366` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x68d11757b0b819e8392e9cf5a0dcc019dab990c92b5884070bbbccd46ac35366` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x6ac06fa426d3ee7e1e1492d1e8bac20730a25585662c28ee57327a6823c99461` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x6ac06fa426d3ee7e1e1492d1e8bac20730a25585662c28ee57327a6823c99461` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x788de17a74d77a4e414bcdcec6475be3a623c24d4bf4016c92005ffe5cbdbd06` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x788de17a74d77a4e414bcdcec6475be3a623c24d4bf4016c92005ffe5cbdbd06` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x7b6359e6345f9579d0d7bfb87cf815eb748b10dfec77f04d830240a3c377f7aa` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x7b6359e6345f9579d0d7bfb87cf815eb748b10dfec77f04d830240a3c377f7aa` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x7f8988f65ef4e9f910fad3021858e5f4db43fbb72c883fbafe0dc8a7072e7524` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x7f8988f65ef4e9f910fad3021858e5f4db43fbb72c883fbafe0dc8a7072e7524` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x8238f47d03bc01514154ca3909abc8b9633cdef12cc9951abaebffe843c14041` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x8238f47d03bc01514154ca3909abc8b9633cdef12cc9951abaebffe843c14041` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x86287325a910c1c9fa03e562b552b24254a8db6330b9d66e878999dd50b577d6` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x86287325a910c1c9fa03e562b552b24254a8db6330b9d66e878999dd50b577d6` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x886a678a6e522f119673a545e9077525fba6e40fb298a78973e2fe5b2d60bb24` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x886a678a6e522f119673a545e9077525fba6e40fb298a78973e2fe5b2d60bb24` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x896289bf1050ec1f4586e230f9aa9501b477b22cc34e67a1374e04751a641b64` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x896289bf1050ec1f4586e230f9aa9501b477b22cc34e67a1374e04751a641b64` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x8bcadc2a27b91cb041e0ae13445664ec836a5c5b2e8e6d59ef44e1a4d70ef858` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x8bcadc2a27b91cb041e0ae13445664ec836a5c5b2e8e6d59ef44e1a4d70ef858` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x8c9237de5d36d7d3c5e5e4d3b5f4d090017b2dadc24e16ea29f89e1c3697e6cf` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x8c9237de5d36d7d3c5e5e4d3b5f4d090017b2dadc24e16ea29f89e1c3697e6cf` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x8e30e4682feee1bfae686b3a4036cc27f800f452988af57a5d3e59fc1beb78b8` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x8e30e4682feee1bfae686b3a4036cc27f800f452988af57a5d3e59fc1beb78b8` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x8e6c825b6355a8ae7527e0eb42fd256008357f2c185156b417505364fde91f4c` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x8e6c825b6355a8ae7527e0eb42fd256008357f2c185156b417505364fde91f4c` (r:0 w:1) + /// Storage: UNKNOWN KEY `0x94e4f31083caa66ea61002606c7ced3db33ab20d2b4a17663eb336fb18322fb8` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x94e4f31083caa66ea61002606c7ced3db33ab20d2b4a17663eb336fb18322fb8` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xab1f0c77c2ab2d59818acfffa4b9bd82978c00f921f6df4cdf9e95817b3cb82a` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xab1f0c77c2ab2d59818acfffa4b9bd82978c00f921f6df4cdf9e95817b3cb82a` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xac8e27d5534680471ad48b9dbefc509f6dca2f65b29b45279cc651e7d29d93af` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xac8e27d5534680471ad48b9dbefc509f6dca2f65b29b45279cc651e7d29d93af` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xae02f663b6f751aff9ab13560e3b32bde2bf2d04cdd7a252cd29603b34cf67f3` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xae02f663b6f751aff9ab13560e3b32bde2bf2d04cdd7a252cd29603b34cf67f3` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xaeb83a412cf2a74cef832fdec6d973f4fe6eed123bd47f49a3d602262865059b` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xaeb83a412cf2a74cef832fdec6d973f4fe6eed123bd47f49a3d602262865059b` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xb599461299f78aa5ea11de079cceaa320fa794f8388c5a4ffb169edbb3b3836b` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xb599461299f78aa5ea11de079cceaa320fa794f8388c5a4ffb169edbb3b3836b` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xb8e5f31d2d2eab325f2dbee34a6749e013c1d2f1a0020b4c68e7eb3e1c720abc` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xb8e5f31d2d2eab325f2dbee34a6749e013c1d2f1a0020b4c68e7eb3e1c720abc` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xc05903007f6c34f907793c2370e5193d863c7c21c477a9a87e991d7501be6517` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xc05903007f6c34f907793c2370e5193d863c7c21c477a9a87e991d7501be6517` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xca7eae6d26208ecc01bdc6394f588da72afdb8ce73c251fd24f5b19f9aa83cfc` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xca7eae6d26208ecc01bdc6394f588da72afdb8ce73c251fd24f5b19f9aa83cfc` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xcd5796c119221de2ec09358971ccc8d73f9b92f69af509813187c6b2cdc5c6ca` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xcd5796c119221de2ec09358971ccc8d73f9b92f69af509813187c6b2cdc5c6ca` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xd8f34156e481702964e8ade1063453c2c0920afff07e90264e06ed89f9f9631f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xd8f34156e481702964e8ade1063453c2c0920afff07e90264e06ed89f9f9631f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xdaf2aba47fc5c8f30858d1bda295f3513b184c285e00f542adbe181ceb2e088f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xdaf2aba47fc5c8f30858d1bda295f3513b184c285e00f542adbe181ceb2e088f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xdfeac712ddc4d22892e9753643ff8c8fb01efb37d6a8f4a2a2aa7e3143f80817` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xdfeac712ddc4d22892e9753643ff8c8fb01efb37d6a8f4a2a2aa7e3143f80817` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xe086e1cb55ef70ce910b1c7a6c9e9642ee1b2f3027ae8e4e3774dc8db6a3d2a4` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xe086e1cb55ef70ce910b1c7a6c9e9642ee1b2f3027ae8e4e3774dc8db6a3d2a4` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xe618b9e70ac91465a05b26bb320da486a297887d987053f704fb17293e7de326` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xe618b9e70ac91465a05b26bb320da486a297887d987053f704fb17293e7de326` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xe6f5ca06fed122aab7133197633b4e11b8c7a106e6160dfbc95981f8dc26cb57` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xe6f5ca06fed122aab7133197633b4e11b8c7a106e6160dfbc95981f8dc26cb57` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf45418b273b9746266e047754cb524e91a24612f0ba86804f8271dd1b5f4ea0c` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf45418b273b9746266e047754cb524e91a24612f0ba86804f8271dd1b5f4ea0c` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf68346726d2bb257005ebe3126da510c53943008902a3b809ae75fdce5b9f5c1` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf68346726d2bb257005ebe3126da510c53943008902a3b809ae75fdce5b9f5c1` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf70e2a259d913084a49224aee6ba9c098c637944c566723f79379a9dc0498ff1` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf70e2a259d913084a49224aee6ba9c098c637944c566723f79379a9dc0498ff1` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xfb1bc128f8eb8aece1e7f52f2b22b15b90a40f53c500f534aec49d19fff0b8e7` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xfb1bc128f8eb8aece1e7f52f2b22b15b90a40f53c500f534aec49d19fff0b8e7` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xfdb5e3874e72d9ca11b3b0875c92f5b48c71c916c0fe09387cae87e1450a8628` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xfdb5e3874e72d9ca11b3b0875c92f5b48c71c916c0fe09387cae87e1450a8628` (r:0 w:1) + /// The range of component `k` is `[1, 50]`. + fn do_cleanup_publisher(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `356 + k * (32 ±0)` + // Estimated: `5078 + k * (39 ±0)` + // Minimum execution time: 10_000_000 picoseconds. + Weight::from_parts(12_327_673, 0) + .saturating_add(Weight::from_parts(0, 5078)) + // Standard Error: 18_369 + .saturating_add(Weight::from_parts(360_091, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 39).saturating_mul(k.into())) + } +} diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index d99310f1b8240..31527c53ac640 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -306,10 +306,7 @@ impl XcmWeightInfo for RococoXcmWeight { XcmGeneric::::execute_with_origin() } fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + XcmGeneric::::publish(data.len() as u32) } } diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 4268ce5612f52..9d0380df809a2 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -349,4 +349,27 @@ impl WeightInfo { // Minimum execution time: 766_000 picoseconds. Weight::from_parts(807_000, 0) } + /// Storage: `Broadcaster::RegisteredPublishers` (r:1 w:0) + /// Proof: `Broadcaster::RegisteredPublishers` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `Broadcaster::PublishedKeys` (r:1 w:1) + /// Proof: `Broadcaster::PublishedKeys` (`max_values`: None, `max_size`: Some(1613), added: 4088, mode: `MaxEncodedLen`) + /// Storage: `Broadcaster::TotalStorageSize` (r:1 w:1) + /// Proof: `Broadcaster::TotalStorageSize` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Broadcaster::PublisherExists` (r:1 w:1) + /// Proof: `Broadcaster::PublisherExists` (`max_values`: None, `max_size`: Some(13), added: 2488, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 10]`. + pub(crate) fn publish(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `290` + // Estimated: `5078 + n * (372 ±0)` + // Minimum execution time: 33_000_000 picoseconds. + Weight::from_parts(55_703_232, 5078) + // Standard Error: 335_228 + .saturating_add(Weight::from_parts(5_762_702, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 372).saturating_mul(n.into())) + } } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index ec2c21bcff56e..577bfebe7b334 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -965,19 +965,22 @@ mod benchmarks { fn publish(n: Linear<1, { MaxPublishItems::get() }>) -> Result<(), BenchmarkError> { use xcm::latest::MaxPublishValueLength; - // The `Publish` instruction weight scales with the number of items published. - // Each item is benchmarked at maximum value length to represent worst-case - // storage operations. The actual weight formula will be `base_weight + n * per_item_weight`. let max_value_len = MaxPublishValueLength::get() as usize; - // Create publish data: n items, each with a unique hash key and maximum value length + // Calculate value size to fit within a conservative 2KB total storage budget + const KEY_SIZE: usize = 32; + let conservative_total_storage = 2048usize; + let value_size = ((conservative_total_storage / n.max(1) as usize).saturating_sub(KEY_SIZE)) + .min(max_value_len) + .max(1); + let data_vec: Vec<_> = (0..n) .map(|i| { - let mut key = [0u8; 32]; + let mut key = [0u8; KEY_SIZE]; key[0] = i as u8; ( key, - BoundedVec::try_from(vec![i as u8; max_value_len]).unwrap(), + BoundedVec::try_from(vec![i as u8; value_size]).unwrap(), ) }) .collect(); @@ -985,6 +988,8 @@ mod benchmarks { let data = BoundedVec::try_from(data_vec).unwrap(); let origin = T::publish_origin()?; + T::ensure_publisher_registered(&origin)?; + let mut executor = new_executor::(origin); let instruction = Instruction::Publish { data }; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index f7984782cfac2..16ecd63c608b5 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -198,6 +198,11 @@ impl generic::Config for Test { fn publish_origin() -> Result { Ok(Parachain(1000).into()) } + + fn ensure_publisher_registered(_origin: &Location) -> Result<(), BenchmarkError> { + // No registration needed for tests + Ok(()) + } } #[cfg(feature = "runtime-benchmarks")] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index 5bfab2e7edc14..3b8c953cbbae2 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -114,6 +114,15 @@ pub mod pallet { /// Should return a parachain origin that is allowed by the BroadcastHandler filter. /// If set to `Err`, benchmarks which rely on publish will be skipped. fn publish_origin() -> Result { + // Avoid having to set on every runtime that does not want to recieve publish. + Err(BenchmarkError::Skip) + } + + /// Ensure the publisher from the given origin is registered. + /// This should register the parachain as a publisher if not already registered. + /// If set to `Err`, benchmarks which rely on publish will be skipped. + fn ensure_publisher_registered(_origin: &Location) -> Result<(), BenchmarkError> { + // Avoid having to set on every runtime that does not want to recieve publish. Err(BenchmarkError::Skip) } } diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs index a80935f2581ee..7d77f5a4e487b 100644 --- a/polkadot/xcm/src/v5/mod.rs +++ b/polkadot/xcm/src/v5/mod.rs @@ -212,7 +212,7 @@ parameter_types! { pub MaxPalletNameLen: u32 = 48; pub MaxPalletsInfo: u32 = 64; pub MaxAssetTransferFilters: u32 = 6; - pub MaxPublishItems: u32 = 16; + pub MaxPublishItems: u32 = 10; pub MaxPublishValueLength: u32 = 1024; } From a5797743687dccbf813988a00c8fcd050a364ff8 Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 11:36:06 -0300 Subject: [PATCH 53/69] choir: duplicated child info derivation --- .../runtime/parachains/src/broadcaster/mod.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index 74c8601c71552..bd0f35adff19c 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -636,7 +636,9 @@ pub mod pallet { ); // Get or create child trie for this publisher - let child_info = Self::get_or_create_publisher_child_info(origin_para_id); + if !PublisherExists::::contains_key(origin_para_id) { + PublisherExists::::insert(origin_para_id, true); + } // Write to child trie and track keys for enumeration for (key, value) in data { @@ -662,17 +664,6 @@ pub mod pallet { }) } - /// Gets or creates the child trie info for a publisher. - /// - /// Creates the child trie entry on first publish. The MaxPublishers limit is enforced - /// at registration time, so we don't need to check it here. - fn get_or_create_publisher_child_info(para_id: ParaId) -> ChildInfo { - if !PublisherExists::::contains_key(para_id) { - PublisherExists::::insert(para_id, true); - } - Self::derive_child_info(para_id) - } - /// Derives a deterministic child trie identifier from a parachain ID. /// /// The child trie identifier is `(b"pubsub", para_id)` encoded. From 5be81c8110209a91d97a8bba56f763b6b6fc6352 Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 13:34:03 -0300 Subject: [PATCH 54/69] choir: separate weights of sub and misleading param of zombienet --- cumulus/pallets/subscriber/src/lib.rs | 91 +------------- cumulus/pallets/subscriber/src/weights.rs | 137 ++++++++++++++++++++++ pubsub-dev/zombienet.toml | 2 - 3 files changed, 139 insertions(+), 91 deletions(-) create mode 100644 cumulus/pallets/subscriber/src/weights.rs diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index f0bfdd723e817..40130c2135964 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -39,6 +39,7 @@ use frame_support::{ use sp_std::vec; pub use pallet::*; +pub use weights::{WeightInfo, SubstrateWeight}; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; @@ -48,6 +49,7 @@ mod test_util; mod mock; #[cfg(test)] mod tests; +pub mod weights; /// Define subscriptions and handle received data. pub trait SubscriptionHandler { @@ -298,92 +300,3 @@ pub mod pallet { } } } - -pub trait WeightInfo { - fn collect_publisher_roots(n: u32) -> Weight; - fn process_published_data(n: u32, k: u32, s: u32) -> Weight; - fn clear_stored_roots() -> Weight; - - /// Weight for processing relay proof excluding handler execution. - /// Benchmarked with no-op handler. Handler weights are added at runtime. - /// - /// Parameters: - /// - `num_publishers`: Number of publishers being processed - /// - `num_keys`: Total number of keys across all publishers - /// - `total_bytes`: Total bytes of data being decoded - fn process_proof_excluding_handler(num_publishers: u32, num_keys: u32, total_bytes: u32) -> Weight { - Self::collect_publisher_roots(num_publishers) - .saturating_add(Self::process_published_data(num_publishers, num_keys, total_bytes)) - } -} - -/// Weights for cumulus_pallet_subscriber using the Substrate node and recommended hardware. -pub struct SubstrateWeight(core::marker::PhantomData); -impl WeightInfo for SubstrateWeight { - /// The range of component `n` is `[1, 100]`. - fn collect_publisher_roots(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_289 - .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) - } - /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) - /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) - /// The range of component `n` is `[1, 100]`. - /// The range of component `k` is `[1, 10]`. - /// The range of component `s` is `[1, 2048]`. - fn process_published_data(n: u32, k: u32, _s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `5187` - // Minimum execution time: 51_000_000 picoseconds. - Weight::from_parts(51_000_000, 0) - .saturating_add(Weight::from_parts(0, 5187)) - // Standard Error: 448_042 - .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) - // Standard Error: 4_535_424 - .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) - /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) - fn clear_stored_roots() -> Weight { - // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `5187` - // Minimum execution time: 8_000_000 picoseconds. - Weight::from_parts(9_000_000, 0) - .saturating_add(Weight::from_parts(0, 5187)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} - -impl WeightInfo for () { - fn collect_publisher_roots(n: u32) -> Weight { - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) - } - - fn process_published_data(n: u32, k: u32, _s: u32) -> Weight { - Weight::from_parts(51_000_000, 0) - .saturating_add(Weight::from_parts(0, 5187)) - .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) - .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) - .saturating_add(Weight::from_parts(1_000_000, 0)) - .saturating_add(Weight::from_parts(1_000_000, 0)) - } - - fn clear_stored_roots() -> Weight { - Weight::from_parts(9_000_000, 0) - .saturating_add(Weight::from_parts(0, 5187)) - .saturating_add(Weight::from_parts(1_000_000, 0)) - .saturating_add(Weight::from_parts(1_000_000, 0)) - } -} diff --git a/cumulus/pallets/subscriber/src/weights.rs b/cumulus/pallets/subscriber/src/weights.rs new file mode 100644 index 0000000000000..2a8774f12dce5 --- /dev/null +++ b/cumulus/pallets/subscriber/src/weights.rs @@ -0,0 +1,137 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Generated weights for `cumulus_pallet_subscriber` +//! +//! THESE WEIGHTS WERE GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-12-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `asset-hub-westend`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `asset-hub-westend-dev`, DB CACHE: `1024` + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-subscriber +// --chain +// asset-hub-westend-dev +// --output +// cumulus/pallets/subscriber/src/weights.rs +// --template +// substrate/.maintain/frame-weight-template.hbs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `cumulus_pallet_subscriber`. +pub trait WeightInfo { + fn collect_publisher_roots(n: u32) -> Weight; + fn process_published_data(n: u32, k: u32, s: u32) -> Weight; + fn clear_stored_roots() -> Weight; + + /// Weight for processing relay proof excluding handler execution. + /// Benchmarked with no-op handler. Handler weights are added at runtime. + /// + /// Parameters: + /// - `num_publishers`: Number of publishers being processed + /// - `num_keys`: Total number of keys across all publishers + /// - `total_bytes`: Total bytes of data being decoded + fn process_proof_excluding_handler(num_publishers: u32, num_keys: u32, total_bytes: u32) -> Weight { + Self::collect_publisher_roots(num_publishers) + .saturating_add(Self::process_published_data(num_publishers, num_keys, total_bytes)) + } +} + +/// Weights for `cumulus_pallet_subscriber` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// The range of component `n` is `[1, 100]`. + fn collect_publisher_roots(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_289 + .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) + } + /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) + /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 100]`. + /// The range of component `k` is `[1, 10]`. + /// The range of component `s` is `[1, 2048]`. + fn process_published_data(n: u32, k: u32, _s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `5187` + // Minimum execution time: 51_000_000 picoseconds. + Weight::from_parts(51_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + // Standard Error: 448_042 + .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) + // Standard Error: 4_535_424 + .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) + /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) + fn clear_stored_roots() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `5187` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + fn collect_publisher_roots(n: u32) -> Weight { + Weight::from_parts(1_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) + } + + fn process_published_data(n: u32, k: u32, _s: u32) -> Weight { + Weight::from_parts(51_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } + + fn clear_stored_roots() -> Weight { + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 5187)) + .saturating_add(RocksDbWeight::get().reads(1)) + .saturating_add(RocksDbWeight::get().writes(1)) + } +} diff --git a/pubsub-dev/zombienet.toml b/pubsub-dev/zombienet.toml index 840e8026c5408..c803a5a7f8d5f 100644 --- a/pubsub-dev/zombienet.toml +++ b/pubsub-dev/zombienet.toml @@ -32,13 +32,11 @@ cumulus_based = true [[parachains.collators]] name = "para-collator01" rpc_port = 9920 - chain = "penpal-local" command = "./target/release/polkadot-parachain" args = ["-lxcm=trace", "--trie-cache-size=0", "--network-backend=libp2p", "--", "--network-backend=libp2p"] [[parachains.collators]] name = "para-collator02" - chain = "penpal-local" command = "./target/release/polkadot-parachain" args = ["-lxcm=trace", "--trie-cache-size=0", "--network-backend=libp2p", "--", "--network-backend=libp2p"] From d700a13f5f8c6d1e96804d3390f049f04a20f120 Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 14:26:11 -0300 Subject: [PATCH 55/69] fix: clippy and missing handler --- cumulus/pallets/subscriber/src/lib.rs | 2 +- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 1 + polkadot/xcm/xcm-builder/tests/scenarios.rs | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 40130c2135964..728a72bc6d79c 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -185,7 +185,7 @@ pub mod pallet { let prefixed_key = child_info.prefixed_storage_key(); relay_state_proof - .read_optional_entry::<[u8; 32]>(&*prefixed_key) + .read_optional_entry::<[u8; 32]>(&prefixed_key) .ok() .flatten() .map(|root_hash| (*publisher_para_id, root_hash.to_vec())) diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 7a2eb8cc55adf..f22bce759a70e 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -192,6 +192,7 @@ impl xcm_executor::Config for XcmConfig { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type BroadcastHandler = (); type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); diff --git a/polkadot/xcm/xcm-builder/tests/scenarios.rs b/polkadot/xcm/xcm-builder/tests/scenarios.rs index c772a49fc8226..4246e4ca848da 100644 --- a/polkadot/xcm/xcm-builder/tests/scenarios.rs +++ b/polkadot/xcm/xcm-builder/tests/scenarios.rs @@ -399,6 +399,7 @@ fn recursive_xcm_execution_fail() { type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = (); + type BroadcastHandler = (); type HrmpNewChannelOpenRequestHandler = (); type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); From 06835127fabb4a702fa22abd1f6720d872bcfaf8 Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 16:40:46 -0300 Subject: [PATCH 56/69] feat: integrations cleanup --- Cargo.lock | 16 -- Cargo.toml | 2 - cumulus/pallets/pubsub-consumer/Cargo.toml | 35 ---- cumulus/pallets/pubsub-consumer/src/lib.rs | 105 ---------- .../assets/asset-hub-westend/Cargo.toml | 4 - .../assets/asset-hub-westend/src/lib.rs | 22 -- .../src/weights/cumulus_pallet_subscriber.rs | 90 -------- .../asset-hub-westend/src/weights/mod.rs | 1 - .../parachains/src/broadcaster/tests.rs | 7 +- polkadot/runtime/rococo/src/lib.rs | 69 +----- polkadot/runtime/rococo/src/weights/mod.rs | 1 - ...polkadot_runtime_parachains_broadcaster.rs | 197 ------------------ polkadot/runtime/rococo/src/xcm_config.rs | 3 +- pubsub-dev/build-benchmarks.sh | 57 ----- pubsub-dev/build.sh | 56 ----- pubsub-dev/zombienet.toml | 42 ---- 16 files changed, 7 insertions(+), 700 deletions(-) delete mode 100644 cumulus/pallets/pubsub-consumer/Cargo.toml delete mode 100644 cumulus/pallets/pubsub-consumer/src/lib.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs delete mode 100644 polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs delete mode 100755 pubsub-dev/build-benchmarks.sh delete mode 100755 pubsub-dev/build.sh delete mode 100644 pubsub-dev/zombienet.toml diff --git a/Cargo.lock b/Cargo.lock index adb3f03b26f25..f17d932389f66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1371,7 +1371,6 @@ dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", - "cumulus-pallet-subscriber", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", @@ -4743,19 +4742,6 @@ dependencies = [ "syn 2.0.98", ] -[[package]] -name = "cumulus-pallet-pubsub-consumer" -version = "0.1.0" -dependencies = [ - "cumulus-pallet-subscriber", - "cumulus-primitives-core", - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-std 14.0.0", -] - [[package]] name = "cumulus-pallet-session-benchmarking" version = "9.0.0" @@ -19104,8 +19090,6 @@ version = "0.6.0" dependencies = [ "cumulus-pallet-aura-ext", "cumulus-pallet-parachain-system", - "cumulus-pallet-pubsub-consumer", - "cumulus-pallet-subscriber", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", diff --git a/Cargo.toml b/Cargo.toml index b8c3bb04f9bc2..eb8af69d9af4d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,6 @@ members = [ "cumulus/pallets/dmp-queue", "cumulus/pallets/parachain-system", "cumulus/pallets/parachain-system/proc-macro", - "cumulus/pallets/pubsub-consumer", "cumulus/pallets/session-benchmarking", "cumulus/pallets/solo-to-para", "cumulus/pallets/subscriber", @@ -750,7 +749,6 @@ cumulus-pallet-aura-ext = { path = "cumulus/pallets/aura-ext", default-features cumulus-pallet-dmp-queue = { default-features = false, path = "cumulus/pallets/dmp-queue" } cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", default-features = false } cumulus-pallet-parachain-system-proc-macro = { path = "cumulus/pallets/parachain-system/proc-macro", default-features = false } -cumulus-pallet-pubsub-consumer = { path = "cumulus/pallets/pubsub-consumer", default-features = false } cumulus-pallet-subscriber = { path = "cumulus/pallets/subscriber", default-features = false } cumulus-pallet-session-benchmarking = { path = "cumulus/pallets/session-benchmarking", default-features = false } cumulus-pallet-solo-to-para = { path = "cumulus/pallets/solo-to-para", default-features = false } diff --git a/cumulus/pallets/pubsub-consumer/Cargo.toml b/cumulus/pallets/pubsub-consumer/Cargo.toml deleted file mode 100644 index 88a01958ef1df..0000000000000 --- a/cumulus/pallets/pubsub-consumer/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "cumulus-pallet-pubsub-consumer" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -description = "Example consumer pallet for testing pubsub subscriptions" -license = "Apache-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { features = ["derive"], workspace = true } -scale-info = { features = ["derive"], workspace = true } - -# Substrate -frame-support = { workspace = true } -frame-system = { workspace = true } -sp-std = { workspace = true } - -# Cumulus -cumulus-pallet-subscriber = { workspace = true } -cumulus-primitives-core = { workspace = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "cumulus-pallet-subscriber/std", - "cumulus-primitives-core/std", - "frame-support/std", - "frame-system/std", - "scale-info/std", - "sp-std/std", -] diff --git a/cumulus/pallets/pubsub-consumer/src/lib.rs b/cumulus/pallets/pubsub-consumer/src/lib.rs deleted file mode 100644 index 66669d6e7499d..0000000000000 --- a/cumulus/pallets/pubsub-consumer/src/lib.rs +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![cfg_attr(not(feature = "std"), no_std)] - -//! Test consumer for pubsub subscriptions. - -extern crate alloc; - -use alloc::vec::Vec; -use cumulus_primitives_core::ParaId; -use frame_support::{pallet_prelude::*, BoundedVec}; - -pub use pallet::*; - -pub struct TestSubscriptionHandler(core::marker::PhantomData); - -impl cumulus_pallet_subscriber::SubscriptionHandler for TestSubscriptionHandler { - fn subscriptions() -> (Vec<(ParaId, Vec>)>, Weight) { - // Subscribe to keys from publisher ParaId 1000 - let key1 = alloc::vec![ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - ]; - let key2 = alloc::vec![ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - ]; - - (alloc::vec![(ParaId::from(1000), alloc::vec![key1, key2])], Weight::zero()) - } - - fn on_data_updated(publisher: ParaId, key: Vec, value: Vec) -> Weight { - let bounded_key: BoundedVec> = - key.clone().try_into().unwrap_or_default(); - let bounded_value: BoundedVec> = - value.clone().try_into().unwrap_or_default(); - - >::insert(&publisher, &bounded_key, &bounded_value); - - Pallet::::deposit_event(Event::DataReceived { - publisher, - key: bounded_key, - value: bounded_value, - }); - - Weight::zero() - } -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - - #[pallet::pallet] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::storage] - pub type ReceivedData = StorageDoubleMap< - _, - Blake2_128Concat, - ParaId, - Blake2_128Concat, - BoundedVec>, - BoundedVec>, - OptionQuery, - >; - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - DataReceived { - publisher: ParaId, - key: BoundedVec>, - value: BoundedVec>, - }, - } - - impl Pallet { - pub fn get_data(publisher: ParaId, key: &[u8]) -> Option> { - let bounded_key: BoundedVec> = key.to_vec().try_into().ok()?; - ReceivedData::::get(publisher, bounded_key).map(|v| v.into_inner()) - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index e1dac1c813df6..23bc88a16ccf0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -114,7 +114,6 @@ assets-common = { workspace = true } cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-session-benchmarking = { workspace = true } -cumulus-pallet-subscriber = { workspace = true } cumulus-pallet-weight-reclaim = { workspace = true } cumulus-pallet-xcm = { workspace = true } cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } @@ -153,7 +152,6 @@ runtime-benchmarks = [ "assets-common/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-subscriber/runtime-benchmarks", "cumulus-pallet-weight-reclaim/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", @@ -227,7 +225,6 @@ try-runtime = [ "assets-common/try-runtime", "cumulus-pallet-aura-ext/try-runtime", "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-subscriber/try-runtime", "cumulus-pallet-weight-reclaim/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -301,7 +298,6 @@ std = [ "cumulus-pallet-aura-ext/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", - "cumulus-pallet-subscriber/std", "cumulus-pallet-weight-reclaim/std", "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 38cbcebfc42a3..f0cb527c42f09 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -915,26 +915,6 @@ type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< impl parachain_info::Config for Runtime {} -parameter_types! { - pub const MaxPublishers: u32 = 100; -} - -pub struct NoOpSubscriptionHandler; -impl cumulus_pallet_subscriber::SubscriptionHandler for NoOpSubscriptionHandler { - fn subscriptions() -> (Vec<(ParaId, Vec>)>, Weight) { - (vec![], Weight::zero()) - } - fn on_data_updated(_publisher: ParaId, _key: Vec, _value: Vec) -> Weight { - Weight::zero() - } -} - -impl cumulus_pallet_subscriber::Config for Runtime { - type SubscriptionHandler = NoOpSubscriptionHandler; - type WeightInfo = weights::cumulus_pallet_subscriber::WeightInfo; - type MaxPublishers = MaxPublishers; -} - parameter_types! { pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; } @@ -1344,7 +1324,6 @@ construct_runtime!( ParachainInfo: parachain_info = 4, WeightReclaim: cumulus_pallet_weight_reclaim = 5, MultiBlockMigrations: pallet_migrations = 6, - Subscriber: cumulus_pallet_subscriber = 255, Preimage: pallet_preimage = 7, Scheduler: pallet_scheduler = 8, Sudo: pallet_sudo = 9, @@ -1735,7 +1714,6 @@ mod benches { [pallet_transaction_payment, TransactionPayment] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] - [cumulus_pallet_subscriber, Subscriber] [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_treasury, Treasury] [pallet_vesting, Vesting] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs deleted file mode 100644 index 42b566b219251..0000000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_subscriber` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-12-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `192.168.1.2`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-westend-dev -// --wasm-execution=compiled -// --pallet=cumulus_pallet_subscriber -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_subscriber.rs -// --header=./cumulus/file_header.txt - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_subscriber`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_subscriber::WeightInfo for WeightInfo { - /// The range of component `n` is `[1, 100]`. - fn collect_publisher_roots(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_000_000 picoseconds. - Weight::from_parts(1_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_289 - .saturating_add(Weight::from_parts(1_853_718, 0).saturating_mul(n.into())) - } - /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) - /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) - /// The range of component `n` is `[1, 100]`. - /// The range of component `k` is `[1, 10]`. - /// The range of component `s` is `[1, 2048]`. - fn process_published_data(n: u32, k: u32, _s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `5187` - // Minimum execution time: 51_000_000 picoseconds. - Weight::from_parts(51_000_000, 0) - .saturating_add(Weight::from_parts(0, 5187)) - // Standard Error: 448_042 - .saturating_add(Weight::from_parts(33_087_314, 0).saturating_mul(n.into())) - // Standard Error: 4_535_424 - .saturating_add(Weight::from_parts(311_706_924, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Subscriber::PreviousPublishedDataRoots` (r:1 w:1) - /// Proof: `Subscriber::PreviousPublishedDataRoots` (`max_values`: Some(1), `max_size`: Some(3702), added: 4197, mode: `MaxEncodedLen`) - fn clear_stored_roots() -> Weight { - // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `5187` - // Minimum execution time: 8_000_000 picoseconds. - Weight::from_parts(9_000_000, 0) - .saturating_add(Weight::from_parts(0, 5187)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 2df84997cfbd0..c8e94609da202 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -15,7 +15,6 @@ pub mod block_weights; pub mod cumulus_pallet_parachain_system; -pub mod cumulus_pallet_subscriber; pub mod cumulus_pallet_weight_reclaim; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; diff --git a/polkadot/runtime/parachains/src/broadcaster/tests.rs b/polkadot/runtime/parachains/src/broadcaster/tests.rs index 980ca12bee234..8396a9d3e98d9 100644 --- a/polkadot/runtime/parachains/src/broadcaster/tests.rs +++ b/polkadot/runtime/parachains/src/broadcaster/tests.rs @@ -294,10 +294,11 @@ fn max_stored_keys_limit_enforced() { // Publish 50 small items to test MaxStoredKeys without hitting TotalStorageSize limit // Each item is 32 (key) + 1 (value) = 33 bytes, total ~1650 bytes - for batch in 0..4 { + // Publish in batches of 10 items to respect MaxPublishItems = 10 + for batch in 0..5 { let mut data = Vec::new(); - for i in 0..16 { - let key_num = batch * 16 + i; + for i in 0..10 { + let key_num = batch * 10 + i; if key_num < 50 { data.push((hash_key(&format!("key{}", key_num).into_bytes()), b"v".to_vec())); } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 25579a5a903ce..48e30162fe1b0 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -66,8 +66,7 @@ use polkadot_runtime_common::{ BlockHashCount, BlockLength, SlowAdjustingFeeUpdate, }; use polkadot_runtime_parachains::{ - assigner_coretime as parachains_assigner_coretime, broadcaster as parachains_broadcaster, - configuration as parachains_configuration, + assigner_coretime as parachains_assigner_coretime, configuration as parachains_configuration, configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, coretime, disputes as parachains_disputes, disputes::slashing as parachains_slashing, @@ -1199,7 +1198,7 @@ impl parachains_initializer::Config for Runtime { type ForceOrigin = EnsureRoot; type WeightInfo = weights::polkadot_runtime_parachains_initializer::WeightInfo; type CoretimeOnNewSession = Coretime; - type OnNewSessionOutgoing = Broadcaster; + type OnNewSessionOutgoing = (); } impl parachains_disputes::Config for Runtime { @@ -1226,27 +1225,6 @@ impl parachains_slashing::Config for Runtime { type BenchmarkingConfig = parachains_slashing::BenchConfig<200>; } -parameter_types! { - pub const MaxPublishItems: u32 = 10; - pub const MaxValueLength: u32 = 1024; - pub const MaxStoredKeys: u32 = 50; - pub const MaxTotalStorageSize: u32 = 2048; // 2 KiB - pub const MaxPublishers: u32 = 1000; - pub const PublisherDeposit: Balance = 100 * UNITS; -} - -impl parachains_broadcaster::Config for Runtime { - type Currency = Balances; - type RuntimeHoldReason = RuntimeHoldReason; - type WeightInfo = weights::polkadot_runtime_parachains_broadcaster::WeightInfo; - type MaxPublishItems = MaxPublishItems; - type MaxValueLength = MaxValueLength; - type MaxStoredKeys = MaxStoredKeys; - type MaxTotalStorageSize = MaxTotalStorageSize; - type MaxPublishers = MaxPublishers; - type PublisherDeposit = PublisherDeposit; -} - parameter_types! { pub const ParaDeposit: Balance = 40 * UNITS; } @@ -1611,7 +1589,6 @@ construct_runtime! { ParaSessionInfo: parachains_session_info = 61, ParasDisputes: parachains_disputes = 62, ParasSlashing: parachains_slashing = 63, - Broadcaster: parachains_broadcaster = 65, MessageQueue: pallet_message_queue = 64, OnDemandAssignmentProvider: parachains_on_demand = 66, CoretimeAssignmentProvider: parachains_assigner_coretime = 68, @@ -1876,7 +1853,6 @@ mod benches { [polkadot_runtime_common::identity_migrator, IdentityMigrator] [polkadot_runtime_common::slots, Slots] [polkadot_runtime_common::paras_registrar, Registrar] - [polkadot_runtime_parachains::broadcaster, Broadcaster] [polkadot_runtime_parachains::configuration, Configuration] [polkadot_runtime_parachains::coretime, Coretime] [polkadot_runtime_parachains::hrmp, Hrmp] @@ -2682,47 +2658,6 @@ sp_api::impl_runtime_apis! { Ok(AssetHub::get()) } - fn publish_origin() -> Result { - Ok(AssetHub::get()) - } - - fn ensure_publisher_registered(origin: &Location) -> Result<(), BenchmarkError> { - use frame_benchmarking::whitelisted_caller; - use frame_support::ensure; - - // Extract parachain ID from origin - let para_id = match origin.unpack() { - (0, [Junction::Parachain(id)]) => { - polkadot_primitives::Id::from(*id) - }, - (1, [Junction::Parachain(id), ..]) => { - polkadot_primitives::Id::from(*id) - }, - _ => { - return Err(BenchmarkError::Stop("Invalid origin for publisher registration")) - }, - }; - - // Force register the publisher with zero deposit for benchmarking - let manager: AccountId = whitelisted_caller(); - Broadcaster::force_register_publisher( - RuntimeOrigin::root(), - manager, - 0u128, - para_id, - ) - .map_err(|_| BenchmarkError::Stop("Failed to register publisher"))?; - - // Verify registration succeeded - let is_registered = parachains_broadcaster::RegisteredPublishers::::contains_key(para_id); - ensure!( - is_registered, - BenchmarkError::Stop("Publisher not registered after force_register") - ); - - Ok(()) - } - fn claimable_asset() -> Result<(Location, Location, Assets), BenchmarkError> { let origin = AssetHub::get(); let assets: Assets = (AssetId(TokenLocation::get()), 1_000 * UNITS).into(); diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 95afa287f2c5b..1c030c444ac59 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -54,7 +54,6 @@ pub mod polkadot_runtime_common_crowdloan; pub mod polkadot_runtime_common_identity_migrator; pub mod polkadot_runtime_common_paras_registrar; pub mod polkadot_runtime_common_slots; -pub mod polkadot_runtime_parachains_broadcaster; pub mod polkadot_runtime_parachains_configuration; pub mod polkadot_runtime_parachains_coretime; pub mod polkadot_runtime_parachains_disputes; diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs deleted file mode 100644 index 7c35d80517854..0000000000000 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Autogenerated weights for `polkadot_runtime_parachains::broadcaster` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-12-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `192.168.1.2`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot -// benchmark -// pallet -// --chain=rococo-dev -// --steps=50 -// --repeat=20 -// --pallet=polkadot_runtime_parachains::broadcaster -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_broadcaster.rs -// --header=./polkadot/file_header.txt - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `polkadot_runtime_parachains::broadcaster`. -pub struct WeightInfo(PhantomData); -impl polkadot_runtime_parachains::broadcaster::WeightInfo for WeightInfo { - /// Storage: `Broadcaster::RegisteredPublishers` (r:2 w:1) - /// Proof: `Broadcaster::RegisteredPublishers` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) - fn register_publisher() -> Weight { - // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `6060` - // Minimum execution time: 49_000_000 picoseconds. - Weight::from_parts(50_000_000, 0) - .saturating_add(Weight::from_parts(0, 6060)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Broadcaster::RegisteredPublishers` (r:2 w:1) - /// Proof: `Broadcaster::RegisteredPublishers` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) - fn force_register_publisher() -> Weight { - // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `6060` - // Minimum execution time: 12_000_000 picoseconds. - Weight::from_parts(14_000_000, 0) - .saturating_add(Weight::from_parts(0, 6060)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Broadcaster::PublishedKeys` (r:1 w:1) - /// Proof: `Broadcaster::PublishedKeys` (`max_values`: None, `max_size`: Some(1613), added: 4088, mode: `MaxEncodedLen`) - /// Storage: `Broadcaster::PublisherExists` (r:0 w:1) - /// Proof: `Broadcaster::PublisherExists` (`max_values`: None, `max_size`: Some(13), added: 2488, mode: `MaxEncodedLen`) - /// Storage: `Broadcaster::TotalStorageSize` (r:0 w:1) - /// Proof: `Broadcaster::TotalStorageSize` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x15ac8beec98bc93b831fe0c1c3a4973a81a08a41496dab0f714e0b1a36f6907c` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x15ac8beec98bc93b831fe0c1c3a4973a81a08a41496dab0f714e0b1a36f6907c` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x16af40e99eac4ec67683e7084135c6d39fb8c295b6588ed52dcfedd44a5d4edf` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x16af40e99eac4ec67683e7084135c6d39fb8c295b6588ed52dcfedd44a5d4edf` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x16be8f420223354f5ff3201e57c3b8628b3e5b3e209dfcaa89ece4a8e5c68b1d` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x16be8f420223354f5ff3201e57c3b8628b3e5b3e209dfcaa89ece4a8e5c68b1d` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x28b4fdcd9d46acf522102ea5adf42c1a626443b1afef620ef524c61784fc2383` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x28b4fdcd9d46acf522102ea5adf42c1a626443b1afef620ef524c61784fc2383` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x2f1ce9367a8a7f59d87e877861c262dd7b1ca1e976282b28a3d6d7f63282304e` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x2f1ce9367a8a7f59d87e877861c262dd7b1ca1e976282b28a3d6d7f63282304e` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x2fc3acc91091d2a0f88f79a00144305f85993b39b3ce676da78b6ec1e99cee9d` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x2fc3acc91091d2a0f88f79a00144305f85993b39b3ce676da78b6ec1e99cee9d` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x4253a98a38b2917c26e0dcd9f1f3591563c586db07c30c4288a1329f03d5f345` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x4253a98a38b2917c26e0dcd9f1f3591563c586db07c30c4288a1329f03d5f345` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x4a2e603f4ccddc5063e87b25384bc25b8e45c6f0d536292a928ff838e3dfe777` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x4a2e603f4ccddc5063e87b25384bc25b8e45c6f0d536292a928ff838e3dfe777` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x52f26ad87bbab43295b479cda10e4e7e5856670d8110827b328ddffe176d50f9` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x52f26ad87bbab43295b479cda10e4e7e5856670d8110827b328ddffe176d50f9` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x542b58fa02b321fe24d779d0c9487211e8703bb5137aafcfd90293bfd9f62686` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x542b58fa02b321fe24d779d0c9487211e8703bb5137aafcfd90293bfd9f62686` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x56b3cd6cecdf8d5c1bcc82536c0f2a84960aa569dfe456df8b7c6a84a649f5f2` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x56b3cd6cecdf8d5c1bcc82536c0f2a84960aa569dfe456df8b7c6a84a649f5f2` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x5d6c339abbb79e03a57a8ed9dc802d9c6e6bf5a7aca1b846090e13282d8d7ef9` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x5d6c339abbb79e03a57a8ed9dc802d9c6e6bf5a7aca1b846090e13282d8d7ef9` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x5e30b2f53e107c8c9a7b794d5b2ec87170b9ce607738911d525166c5c6ce9d3f` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x5e30b2f53e107c8c9a7b794d5b2ec87170b9ce607738911d525166c5c6ce9d3f` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x5e494c16c55fc5d253faf1f5c631f7629df68cbb892e2d2275d2ba0973251b33` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x5e494c16c55fc5d253faf1f5c631f7629df68cbb892e2d2275d2ba0973251b33` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x65706813667bdea8995a6b3eee946258218c0dee5b61a109f67e6d0bdaeeb0c1` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x65706813667bdea8995a6b3eee946258218c0dee5b61a109f67e6d0bdaeeb0c1` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x65a4b2252574fa1faf8c982779faf7c433d71f8d0724aac8a9791ed68757508b` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x65a4b2252574fa1faf8c982779faf7c433d71f8d0724aac8a9791ed68757508b` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x68d11757b0b819e8392e9cf5a0dcc019dab990c92b5884070bbbccd46ac35366` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x68d11757b0b819e8392e9cf5a0dcc019dab990c92b5884070bbbccd46ac35366` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x6ac06fa426d3ee7e1e1492d1e8bac20730a25585662c28ee57327a6823c99461` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x6ac06fa426d3ee7e1e1492d1e8bac20730a25585662c28ee57327a6823c99461` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x788de17a74d77a4e414bcdcec6475be3a623c24d4bf4016c92005ffe5cbdbd06` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x788de17a74d77a4e414bcdcec6475be3a623c24d4bf4016c92005ffe5cbdbd06` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x7b6359e6345f9579d0d7bfb87cf815eb748b10dfec77f04d830240a3c377f7aa` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x7b6359e6345f9579d0d7bfb87cf815eb748b10dfec77f04d830240a3c377f7aa` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x7f8988f65ef4e9f910fad3021858e5f4db43fbb72c883fbafe0dc8a7072e7524` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x7f8988f65ef4e9f910fad3021858e5f4db43fbb72c883fbafe0dc8a7072e7524` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x8238f47d03bc01514154ca3909abc8b9633cdef12cc9951abaebffe843c14041` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x8238f47d03bc01514154ca3909abc8b9633cdef12cc9951abaebffe843c14041` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x86287325a910c1c9fa03e562b552b24254a8db6330b9d66e878999dd50b577d6` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x86287325a910c1c9fa03e562b552b24254a8db6330b9d66e878999dd50b577d6` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x886a678a6e522f119673a545e9077525fba6e40fb298a78973e2fe5b2d60bb24` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x886a678a6e522f119673a545e9077525fba6e40fb298a78973e2fe5b2d60bb24` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x896289bf1050ec1f4586e230f9aa9501b477b22cc34e67a1374e04751a641b64` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x896289bf1050ec1f4586e230f9aa9501b477b22cc34e67a1374e04751a641b64` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x8bcadc2a27b91cb041e0ae13445664ec836a5c5b2e8e6d59ef44e1a4d70ef858` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x8bcadc2a27b91cb041e0ae13445664ec836a5c5b2e8e6d59ef44e1a4d70ef858` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x8c9237de5d36d7d3c5e5e4d3b5f4d090017b2dadc24e16ea29f89e1c3697e6cf` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x8c9237de5d36d7d3c5e5e4d3b5f4d090017b2dadc24e16ea29f89e1c3697e6cf` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x8e30e4682feee1bfae686b3a4036cc27f800f452988af57a5d3e59fc1beb78b8` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x8e30e4682feee1bfae686b3a4036cc27f800f452988af57a5d3e59fc1beb78b8` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x8e6c825b6355a8ae7527e0eb42fd256008357f2c185156b417505364fde91f4c` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x8e6c825b6355a8ae7527e0eb42fd256008357f2c185156b417505364fde91f4c` (r:0 w:1) - /// Storage: UNKNOWN KEY `0x94e4f31083caa66ea61002606c7ced3db33ab20d2b4a17663eb336fb18322fb8` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x94e4f31083caa66ea61002606c7ced3db33ab20d2b4a17663eb336fb18322fb8` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xab1f0c77c2ab2d59818acfffa4b9bd82978c00f921f6df4cdf9e95817b3cb82a` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xab1f0c77c2ab2d59818acfffa4b9bd82978c00f921f6df4cdf9e95817b3cb82a` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xac8e27d5534680471ad48b9dbefc509f6dca2f65b29b45279cc651e7d29d93af` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xac8e27d5534680471ad48b9dbefc509f6dca2f65b29b45279cc651e7d29d93af` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xae02f663b6f751aff9ab13560e3b32bde2bf2d04cdd7a252cd29603b34cf67f3` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xae02f663b6f751aff9ab13560e3b32bde2bf2d04cdd7a252cd29603b34cf67f3` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xaeb83a412cf2a74cef832fdec6d973f4fe6eed123bd47f49a3d602262865059b` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xaeb83a412cf2a74cef832fdec6d973f4fe6eed123bd47f49a3d602262865059b` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xb599461299f78aa5ea11de079cceaa320fa794f8388c5a4ffb169edbb3b3836b` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xb599461299f78aa5ea11de079cceaa320fa794f8388c5a4ffb169edbb3b3836b` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xb8e5f31d2d2eab325f2dbee34a6749e013c1d2f1a0020b4c68e7eb3e1c720abc` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xb8e5f31d2d2eab325f2dbee34a6749e013c1d2f1a0020b4c68e7eb3e1c720abc` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xc05903007f6c34f907793c2370e5193d863c7c21c477a9a87e991d7501be6517` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xc05903007f6c34f907793c2370e5193d863c7c21c477a9a87e991d7501be6517` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xca7eae6d26208ecc01bdc6394f588da72afdb8ce73c251fd24f5b19f9aa83cfc` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xca7eae6d26208ecc01bdc6394f588da72afdb8ce73c251fd24f5b19f9aa83cfc` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xcd5796c119221de2ec09358971ccc8d73f9b92f69af509813187c6b2cdc5c6ca` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xcd5796c119221de2ec09358971ccc8d73f9b92f69af509813187c6b2cdc5c6ca` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xd8f34156e481702964e8ade1063453c2c0920afff07e90264e06ed89f9f9631f` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xd8f34156e481702964e8ade1063453c2c0920afff07e90264e06ed89f9f9631f` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xdaf2aba47fc5c8f30858d1bda295f3513b184c285e00f542adbe181ceb2e088f` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xdaf2aba47fc5c8f30858d1bda295f3513b184c285e00f542adbe181ceb2e088f` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xdfeac712ddc4d22892e9753643ff8c8fb01efb37d6a8f4a2a2aa7e3143f80817` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xdfeac712ddc4d22892e9753643ff8c8fb01efb37d6a8f4a2a2aa7e3143f80817` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xe086e1cb55ef70ce910b1c7a6c9e9642ee1b2f3027ae8e4e3774dc8db6a3d2a4` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xe086e1cb55ef70ce910b1c7a6c9e9642ee1b2f3027ae8e4e3774dc8db6a3d2a4` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xe618b9e70ac91465a05b26bb320da486a297887d987053f704fb17293e7de326` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xe618b9e70ac91465a05b26bb320da486a297887d987053f704fb17293e7de326` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xe6f5ca06fed122aab7133197633b4e11b8c7a106e6160dfbc95981f8dc26cb57` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xe6f5ca06fed122aab7133197633b4e11b8c7a106e6160dfbc95981f8dc26cb57` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xf45418b273b9746266e047754cb524e91a24612f0ba86804f8271dd1b5f4ea0c` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xf45418b273b9746266e047754cb524e91a24612f0ba86804f8271dd1b5f4ea0c` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xf68346726d2bb257005ebe3126da510c53943008902a3b809ae75fdce5b9f5c1` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xf68346726d2bb257005ebe3126da510c53943008902a3b809ae75fdce5b9f5c1` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xf70e2a259d913084a49224aee6ba9c098c637944c566723f79379a9dc0498ff1` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xf70e2a259d913084a49224aee6ba9c098c637944c566723f79379a9dc0498ff1` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xfb1bc128f8eb8aece1e7f52f2b22b15b90a40f53c500f534aec49d19fff0b8e7` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xfb1bc128f8eb8aece1e7f52f2b22b15b90a40f53c500f534aec49d19fff0b8e7` (r:0 w:1) - /// Storage: UNKNOWN KEY `0xfdb5e3874e72d9ca11b3b0875c92f5b48c71c916c0fe09387cae87e1450a8628` (r:0 w:1) - /// Proof: UNKNOWN KEY `0xfdb5e3874e72d9ca11b3b0875c92f5b48c71c916c0fe09387cae87e1450a8628` (r:0 w:1) - /// The range of component `k` is `[1, 50]`. - fn do_cleanup_publisher(k: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `356 + k * (32 ±0)` - // Estimated: `5078 + k * (39 ±0)` - // Minimum execution time: 10_000_000 picoseconds. - Weight::from_parts(12_327_673, 0) - .saturating_add(Weight::from_parts(0, 5078)) - // Standard Error: 18_369 - .saturating_add(Weight::from_parts(360_091, 0).saturating_mul(k.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) - .saturating_add(Weight::from_parts(0, 39).saturating_mul(k.into())) - } -} diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 65e3931831ead..5b6654438fa62 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -227,8 +227,7 @@ impl xcm_executor::Config for XcmConfig { type HrmpChannelAcceptedHandler = (); type HrmpChannelClosingHandler = (); type XcmRecorder = XcmPallet; - type BroadcastHandler = - xcm_builder::ParachainBroadcastAdapter; + type BroadcastHandler = (); } parameter_types! { diff --git a/pubsub-dev/build-benchmarks.sh b/pubsub-dev/build-benchmarks.sh deleted file mode 100755 index f4883f3dd8db5..0000000000000 --- a/pubsub-dev/build-benchmarks.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -export DYLD_LIBRARY_PATH=/Library/Developer/CommandLineTools/usr/lib -export SKIP_PALLET_REVIVE_FIXTURES=1 - -echo "🔨 Building Polkadot SDK binaries with runtime-benchmarks feature..." -echo - -# Build main polkadot binary with runtime-benchmarks -echo "📦 Building polkadot relay chain binary (with runtime-benchmarks)..." -cargo build --release -p polkadot --bin polkadot --features runtime-benchmarks -if [ $? -ne 0 ]; then - echo "❌ Failed to build polkadot binary" - exit 1 -fi -echo "✅ polkadot binary built successfully" -echo - -# Build PVF execute worker -echo "📦 Building polkadot-execute-worker..." -cargo build --release -p polkadot --bin polkadot-execute-worker -if [ $? -ne 0 ]; then - echo "❌ Failed to build polkadot-execute-worker" - exit 1 -fi -echo "✅ polkadot-execute-worker built successfully" -echo - -# Build PVF prepare worker -echo "📦 Building polkadot-prepare-worker..." -cargo build --release -p polkadot --bin polkadot-prepare-worker -if [ $? -ne 0 ]; then - echo "❌ Failed to build polkadot-prepare-worker" - exit 1 -fi -echo "✅ polkadot-prepare-worker built successfully" -echo - -# Build parachain binary with runtime-benchmarks -echo "📦 Building polkadot-parachain binary (with runtime-benchmarks)..." -cargo build --release -p polkadot-parachain-bin --bin polkadot-parachain --features runtime-benchmarks -if [ $? -ne 0 ]; then - echo "❌ Failed to build polkadot-parachain binary" - exit 1 -fi -echo "✅ polkadot-parachain binary built successfully" -echo - -echo "🎉 All binaries built successfully with runtime-benchmarks!" -echo -echo "📍 Binary locations:" -echo " - Relay chain: target/release/polkadot" -echo " - Execute worker: target/release/polkadot-execute-worker" -echo " - Prepare worker: target/release/polkadot-prepare-worker" -echo " - Parachain: target/release/polkadot-parachain" -echo -echo "🚀 Ready for benchmarking!" diff --git a/pubsub-dev/build.sh b/pubsub-dev/build.sh deleted file mode 100755 index dd968f1856fe1..0000000000000 --- a/pubsub-dev/build.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -export DYLD_LIBRARY_PATH=/Library/Developer/CommandLineTools/usr/lib - -echo "🔨 Building Polkadot SDK binaries for pubsub XCM v5 testing..." -echo - -# Build main polkadot binary -echo "📦 Building polkadot relay chain binary..." -cargo build --release -p polkadot --bin polkadot -if [ $? -ne 0 ]; then - echo "❌ Failed to build polkadot binary" - exit 1 -fi -echo "✅ polkadot binary built successfully" -echo - -# Build PVF execute worker -echo "📦 Building polkadot-execute-worker..." -cargo build --release -p polkadot --bin polkadot-execute-worker -if [ $? -ne 0 ]; then - echo "❌ Failed to build polkadot-execute-worker" - exit 1 -fi -echo "✅ polkadot-execute-worker built successfully" -echo - -# Build PVF prepare worker -echo "📦 Building polkadot-prepare-worker..." -cargo build --release -p polkadot --bin polkadot-prepare-worker -if [ $? -ne 0 ]; then - echo "❌ Failed to build polkadot-prepare-worker" - exit 1 -fi -echo "✅ polkadot-prepare-worker built successfully" -echo - -# Build parachain binary -echo "📦 Building polkadot-parachain binary..." -cargo build --release -p polkadot-parachain-bin --bin polkadot-parachain -if [ $? -ne 0 ]; then - echo "❌ Failed to build polkadot-parachain binary" - exit 1 -fi -echo "✅ polkadot-parachain binary built successfully" -echo - -echo "🎉 All binaries built successfully!" -echo -echo "📍 Binary locations:" -echo " - Relay chain: target/release/polkadot" -echo " - Execute worker: target/release/polkadot-execute-worker" -echo " - Prepare worker: target/release/polkadot-prepare-worker" -echo " - Parachain: target/release/polkadot-parachain" -echo -echo "🚀 Ready for zombienet testing!" \ No newline at end of file diff --git a/pubsub-dev/zombienet.toml b/pubsub-dev/zombienet.toml deleted file mode 100644 index c803a5a7f8d5f..0000000000000 --- a/pubsub-dev/zombienet.toml +++ /dev/null @@ -1,42 +0,0 @@ -[relaychain] -chain = "rococo-local" -default_command = "./target/release/polkadot" -default_args = [ "-lparachain=debug", "-lxcm=trace" ] - - [[relaychain.nodes]] - name = "alice" - rpc_port = 9900 - validator = true - args = ["--trie-cache-size=0", "--disable-worker-version-check"] - - [[relaychain.nodes]] - name = "bob" - validator = true - args = ["--trie-cache-size=0", "--disable-worker-version-check"] - - [[relaychain.nodes]] - name = "charlie" - validator = true - args = ["--trie-cache-size=0", "--disable-worker-version-check"] - - [[relaychain.nodes]] - name = "dave" - validator = true - args = ["--trie-cache-size=0", "--disable-worker-version-check"] - -[[parachains]] -id = 1000 -addToGenesis = true -cumulus_based = true - - [[parachains.collators]] - name = "para-collator01" - rpc_port = 9920 - command = "./target/release/polkadot-parachain" - args = ["-lxcm=trace", "--trie-cache-size=0", "--network-backend=libp2p", "--", "--network-backend=libp2p"] - - [[parachains.collators]] - name = "para-collator02" - command = "./target/release/polkadot-parachain" - args = ["-lxcm=trace", "--trie-cache-size=0", "--network-backend=libp2p", "--", "--network-backend=libp2p"] - From b75cac4e8b4848c4f1e66e4a45fe86b2fa5a47fc Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 17:53:18 -0300 Subject: [PATCH 57/69] feat: max out publish on runtimes and unit weight for broadcaster --- Cargo.lock | 378 +++--------------- .../asset-hub-rococo/src/weights/xcm/mod.rs | 8 +- .../asset-hub-westend/src/weights/xcm/mod.rs | 8 +- .../bridge-hub-rococo/src/weights/xcm/mod.rs | 8 +- .../bridge-hub-westend/src/weights/xcm/mod.rs | 8 +- .../src/weights/xcm/mod.rs | 8 +- .../coretime-westend/src/weights/xcm/mod.rs | 8 +- .../people-westend/src/weights/xcm/mod.rs | 8 +- .../parachains/src/broadcaster/weights.rs | 11 +- .../runtime/rococo/src/weights/xcm/mod.rs | 5 +- .../xcm/pallet_xcm_benchmarks_generic.rs | 23 -- .../runtime/westend/src/weights/xcm/mod.rs | 8 +- 12 files changed, 83 insertions(+), 398 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f17d932389f66..7e8969262b741 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1193,36 +1193,6 @@ dependencies = [ "testnet-parachains-constants", ] -[[package]] -name = "asset-hub-rococo-integration-tests" -version = "1.0.0" -dependencies = [ - "assert_matches", - "asset-test-utils", - "cumulus-pallet-parachain-system", - "emulated-integration-tests-common", - "frame-support", - "frame-system", - "pallet-asset-conversion", - "pallet-asset-rewards", - "pallet-assets", - "pallet-balances", - "pallet-message-queue", - "pallet-treasury", - "pallet-utility", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-runtime-common", - "rococo-runtime-constants", - "rococo-system-emulated-network", - "sp-core 28.0.0", - "sp-runtime", - "staging-xcm", - "staging-xcm-executor", - "xcm-runtime-apis", -] - [[package]] name = "asset-hub-rococo-runtime" version = "0.11.0" @@ -1409,6 +1379,7 @@ dependencies = [ "pallet-indices", "pallet-message-queue", "pallet-migrations", + "pallet-multi-asset-bounties", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", @@ -2590,7 +2561,6 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", - "rococo-system-emulated-network", "rococo-westend-system-emulated-network", "scale-info", "snowbridge-inbound-queue-primitives", @@ -3764,104 +3734,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "coretime-rococo-emulated-chain" -version = "0.1.0" -dependencies = [ - "coretime-rococo-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "sp-core 28.0.0", - "testnet-parachains-constants", -] - -[[package]] -name = "coretime-rococo-integration-tests" -version = "0.0.0" -dependencies = [ - "cumulus-pallet-parachain-system", - "emulated-integration-tests-common", - "frame-support", - "pallet-broker", - "pallet-message-queue", - "polkadot-runtime-parachains", - "rococo-runtime-constants", - "rococo-system-emulated-network", - "sp-runtime", - "staging-xcm", -] - -[[package]] -name = "coretime-rococo-runtime" -version = "0.1.0" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-weight-reclaim", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-broker", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-proxy", - "pallet-session", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parachains-runtimes-test-utils", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "rococo-runtime-constants", - "scale-info", - "serde", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-keyring", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "tracing", - "xcm-runtime-apis", -] - [[package]] name = "coretime-westend-emulated-chain" version = "0.1.0" @@ -5151,6 +5023,9 @@ dependencies = [ "cumulus-primitives-core", "parity-scale-codec", "polkadot-primitives", + "proptest", + "sp-consensus-babe", + "sp-core 28.0.0", "sp-runtime", "sp-state-machine", "sp-trie", @@ -6810,7 +6685,7 @@ dependencies = [ "sp-version", "sp-wasm-interface 20.0.0", "substrate-test-runtime", - "subxt 0.43.0", + "subxt 0.43.1", "subxt-signer 0.43.0", "thiserror 1.0.65", "thousands", @@ -6850,7 +6725,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e56c0e51972d7b26ff76966c4d0f2307030df9daa5ce0885149ece1ab7ca5ad" dependencies = [ - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "parity-scale-codec", "scale-decode", "scale-info", @@ -6942,9 +6817,9 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "23.0.0" +version = "23.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c26fcb0454397c522c05fdad5380c4e622f8a875638af33bff5a320d1fc965" +checksum = "9ba5be0edbdb824843a0f9c6f0906ecfc66c5316218d74457003218b24909ed0" dependencies = [ "cfg-if", "parity-scale-codec", @@ -6959,7 +6834,7 @@ dependencies = [ "array-bytes 6.2.2", "const-hex", "docify", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "frame-support", "frame-system", "log", @@ -7038,7 +6913,7 @@ dependencies = [ "bitflags 1.3.2", "docify", "environmental", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "frame-support-procedural", "frame-system", "impl-trait-for-tuples", @@ -7127,7 +7002,7 @@ version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-executive", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "frame-support", "frame-support-test-pallet", "frame-system", @@ -10343,7 +10218,7 @@ checksum = "b3e3e3f549d27d2dc054372f320ddf68045a833fab490563ff70d4cf1b9d91ea" dependencies = [ "array-bytes 9.1.2", "blake3", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "parity-scale-codec", "scale-decode", "scale-info", @@ -12572,7 +12447,7 @@ dependencies = [ name = "pallet-example-view-functions" version = "1.0.0" dependencies = [ - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "frame-support", "frame-system", "log", @@ -13390,7 +13265,7 @@ dependencies = [ "sp-weights", "sqlx", "substrate-prometheus-endpoint", - "subxt 0.43.0", + "subxt 0.43.1", "subxt-signer 0.43.0", "thiserror 1.0.65", "tokio", @@ -14444,8 +14319,10 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "pallet-message-queue", + "pallet-multi-asset-bounties", "pallet-treasury", "pallet-xcm", + "parachains-common-types", "parity-scale-codec", "polkadot-primitives", "polkadot-runtime-common", @@ -14460,6 +14337,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "parachains-common-types" +version = "0.1.0" +dependencies = [ + "sp-consensus-aura", + "sp-core 28.0.0", + "sp-runtime", +] + [[package]] name = "parachains-relay" version = "0.1.0" @@ -14769,103 +14655,6 @@ dependencies = [ "xcm-runtime-apis", ] -[[package]] -name = "people-rococo-emulated-chain" -version = "0.1.0" -dependencies = [ - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "people-rococo-runtime", - "sp-core 28.0.0", - "testnet-parachains-constants", -] - -[[package]] -name = "people-rococo-integration-tests" -version = "0.1.0" -dependencies = [ - "asset-test-utils", - "emulated-integration-tests-common", - "frame-support", - "pallet-balances", - "parachains-common", - "rococo-system-emulated-network", - "sp-runtime", - "staging-xcm", - "staging-xcm-executor", -] - -[[package]] -name = "people-rococo-runtime" -version = "0.1.0" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-weight-reclaim", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "enumflags2", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-identity", - "pallet-message-queue", - "pallet-migrations", - "pallet-multisig", - "pallet-proxy", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parachains-runtimes-test-utils", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "rococo-runtime-constants", - "scale-info", - "serde", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-keyring", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-storage 19.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", - "testnet-parachains-constants", - "tracing", - "xcm-runtime-apis", -] - [[package]] name = "people-westend-emulated-chain" version = "0.1.0" @@ -16152,7 +15941,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-transaction-payment-rpc", "pallet-transaction-payment-rpc-runtime-api", - "parachains-common", + "parachains-common-types", "parity-scale-codec", "polkadot-cli", "polkadot-primitives", @@ -16245,7 +16034,6 @@ dependencies = [ "bridge-hub-westend-runtime", "collectives-westend-runtime", "color-eyre", - "coretime-rococo-runtime", "coretime-westend-runtime", "cumulus-client-consensus-aura", "cumulus-primitives-core", @@ -16254,10 +16042,8 @@ dependencies = [ "log", "parachains-common", "penpal-runtime", - "people-rococo-runtime", "people-westend-runtime", "polkadot-omni-node-lib", - "rococo-parachain-runtime", "sc-chain-spec", "sc-cli", "sc-service", @@ -16688,6 +16474,7 @@ dependencies = [ "pallet-xcm-bridge-hub-router", "pallet-xcm-precompiles", "parachains-common", + "parachains-common-types", "parachains-runtimes-test-utils", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", @@ -17437,7 +17224,7 @@ dependencies = [ "sp-core 28.0.0", "sp-io", "substrate-build-script-utils", - "subxt 0.43.0", + "subxt 0.43.1", "tokio", "tokio-util", "zombienet-orchestrator", @@ -19084,58 +18871,6 @@ dependencies = [ "sp-keyring", ] -[[package]] -name = "rococo-parachain-runtime" -version = "0.6.0" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-weight-reclaim", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-ping", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "pallet-assets", - "pallet-aura", - "pallet-balances", - "pallet-message-queue", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core 28.0.0", - "sp-genesis-builder", - "sp-inherents", - "sp-keyring", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - [[package]] name = "rococo-runtime" version = "7.0.0" @@ -19251,19 +18986,6 @@ dependencies = [ "staging-xcm-builder", ] -[[package]] -name = "rococo-system-emulated-network" -version = "0.0.0" -dependencies = [ - "asset-hub-rococo-emulated-chain", - "bridge-hub-rococo-emulated-chain", - "coretime-rococo-emulated-chain", - "emulated-integration-tests-common", - "penpal-emulated-chain", - "people-rococo-emulated-chain", - "rococo-emulated-chain", -] - [[package]] name = "rococo-westend-system-emulated-network" version = "0.0.0" @@ -20624,7 +20346,6 @@ dependencies = [ "sp-arithmetic", "sp-blockchain", "sp-consensus", - "sp-consensus-grandpa", "sp-core 28.0.0", "sp-runtime", "sp-test-primitives", @@ -20909,7 +20630,7 @@ dependencies = [ "sp-state-machine", "sp-version", "sp-wasm-interface 20.0.0", - "subxt 0.43.0", + "subxt 0.43.1", "thiserror 1.0.65", ] @@ -21185,6 +20906,7 @@ dependencies = [ "sp-runtime", "sp-tracing 16.0.0", "sp-transaction-pool", + "strum 0.26.3", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", @@ -21213,6 +20935,7 @@ dependencies = [ "sp-blockchain", "sp-core 28.0.0", "sp-runtime", + "strum 0.26.3", "thiserror 1.0.65", ] @@ -22341,6 +22064,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "impl-trait-for-tuples", "parity-scale-codec", "scale-info", "snowbridge-beacon-primitives", @@ -23358,6 +23082,7 @@ dependencies = [ name = "sp-debug-derive" version = "14.0.0" dependencies = [ + "proc-macro-warning", "proc-macro2 1.0.95", "quote 1.0.40", "syn 2.0.98", @@ -23474,7 +23199,7 @@ dependencies = [ name = "sp-metadata-ir" version = "0.6.0" dependencies = [ - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "parity-scale-codec", "scale-info", ] @@ -23587,6 +23312,7 @@ dependencies = [ "sp-tracing 16.0.0", "sp-trie", "sp-weights", + "strum 0.26.3", "substrate-test-runtime-client", "tracing", "tuplex", @@ -24870,7 +24596,7 @@ dependencies = [ "cargo_metadata", "console", "filetime", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "jobserver", "merkleized-metadata", "parity-scale-codec", @@ -24897,7 +24623,7 @@ dependencies = [ "anyhow", "env_logger 0.11.3", "log", - "subxt 0.43.0", + "subxt 0.43.1", "subxt-signer 0.43.0", "tokio", "zombienet-configuration", @@ -24962,14 +24688,14 @@ dependencies = [ [[package]] name = "subxt" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74791ddeaaa6de42e7cc8a715c83eb73303f513f90af701fd07eb2caad92ed84" +checksum = "f8c6dc0f90e23c521465b8f7e026af04a48cc6f00c51d88a8d313d33096149de" dependencies = [ "async-trait", "derive-where", "either", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "futures", "hex", "jsonrpsee", @@ -24985,7 +24711,7 @@ dependencies = [ "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "subxt-core 0.43.0", "subxt-lightclient 0.43.0", - "subxt-macro 0.43.0", + "subxt-macro 0.43.1", "subxt-metadata 0.43.0", "subxt-rpcs 0.43.0", "thiserror 2.0.12", @@ -25071,7 +24797,7 @@ dependencies = [ "blake2 0.10.6", "derive-where", "frame-decode 0.8.3", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "hashbrown 0.14.5", "hex", "impl-serde", @@ -25143,9 +24869,9 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69516e8ff0e9340a0f21b8398da7f997571af4734ee81deada5150a2668c8443" +checksum = "c269228a2e5de4c0c61ed872b701967ee761df0f167d5b91ecec1185bca65793" dependencies = [ "darling 0.20.10", "parity-scale-codec", @@ -25180,7 +24906,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c134068711c0c46906abc0e6e4911204420331530738e18ca903a5469364d9f" dependencies = [ "frame-decode 0.8.3", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "hashbrown 0.14.5", "parity-scale-codec", "scale-info", @@ -25220,7 +24946,7 @@ checksum = "25de7727144780d780a6a7d78bbfd28414b8adbab68b05e87329c367d7705be4" dependencies = [ "derive-where", "finito", - "frame-metadata 23.0.0", + "frame-metadata 23.0.1", "futures", "hex", "impl-serde", @@ -26368,9 +26094,9 @@ dependencies = [ [[package]] name = "trie-bench" -version = "0.42.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "972be214c558b1a5550d34c8c7e55a284f6439cefc51226d6ffbfc152de5cc58" +checksum = "a03bee4700c5dd6b2ceba5e4e4d5a7017704a761481824d3033d223f9660973a" dependencies = [ "criterion", "hash-db", @@ -26384,9 +26110,9 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c0670ab45a6b7002c7df369fee950a27cf29ae0474343fd3a15aa15f691e7a6" +checksum = "a7795f2df2ef744e4ffb2125f09325e60a21d305cc3ecece0adeef03f7a9e560" dependencies = [ "hash-db", "log", @@ -28722,7 +28448,7 @@ dependencies = [ "serde_json", "sha2 0.10.9", "sp-core 36.1.0", - "subxt 0.43.0", + "subxt 0.43.1", "subxt-signer 0.43.0", "thiserror 1.0.65", "tokio", @@ -28785,7 +28511,7 @@ dependencies = [ "async-trait", "futures", "lazy_static", - "subxt 0.43.0", + "subxt 0.43.1", "subxt-signer 0.43.0", "tokio", "zombienet-configuration", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs index caed2e572535b..f2a13347eaac0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -271,10 +271,8 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + fn publish(_data: &PublishData) -> Weight { + // AssetHubRococo does not currently support Publish operations + Weight::MAX } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index aec703f6641b1..ad08905dd60e5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -302,10 +302,8 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + fn publish(_data: &PublishData) -> Weight { + // AssetHubWestend does not currently support Publish operations + Weight::MAX } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index 708ca3814932c..505da0d89d915 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -272,10 +272,8 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + fn publish(_data: &PublishData) -> Weight { + // BridgeHubRococo does not currently support Publish operations + Weight::MAX } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs index 69aa5aea19eb7..8857f83b53a06 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -272,10 +272,8 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + fn publish(_data: &PublishData) -> Weight { + // BridgeHubWestend does not currently support Publish operations + Weight::MAX } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs index 00a7f5631f51d..85e4268db2213 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/xcm/mod.rs @@ -270,10 +270,8 @@ impl XcmWeightInfo for CollectivesWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + fn publish(_data: &PublishData) -> Weight { + // CollectivesWestend does not currently support Publish operations + Weight::MAX } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index da47e67b56a8e..7f21c91ee4a83 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -269,10 +269,8 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + fn publish(_data: &PublishData) -> Weight { + // CoretimeWestend does not currently support Publish operations + Weight::MAX } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index 431ff33f918f3..f9206b53b1781 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -268,10 +268,8 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + fn publish(_data: &PublishData) -> Weight { + // PeopleWestend does not currently support Publish operations + Weight::MAX } } diff --git a/polkadot/runtime/parachains/src/broadcaster/weights.rs b/polkadot/runtime/parachains/src/broadcaster/weights.rs index 65b82bf29dddf..3586a929caa4b 100644 --- a/polkadot/runtime/parachains/src/broadcaster/weights.rs +++ b/polkadot/runtime/parachains/src/broadcaster/weights.rs @@ -31,17 +31,14 @@ pub trait WeightInfo { /// Placeholder weights (to be replaced with benchmarked values). impl WeightInfo for () { fn register_publisher() -> Weight { - Weight::from_parts(20_000_000, 0) - .saturating_add(Weight::from_parts(0, 3000)) + Weight::zero() } fn force_register_publisher() -> Weight { - Weight::from_parts(15_000_000, 0) - .saturating_add(Weight::from_parts(0, 3000)) + Weight::zero() } - fn do_cleanup_publisher(k: u32) -> Weight { - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(5_000_000, 0).saturating_mul(k.into())) + fn do_cleanup_publisher(_k: u32) -> Weight { + Weight::zero() } } diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index 31527c53ac640..ab9f5deed2ba8 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -305,8 +305,9 @@ impl XcmWeightInfo for RococoXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - XcmGeneric::::publish(data.len() as u32) + fn publish(_data: &PublishData) -> Weight { + // Rococo does not currently support Publish operations + Weight::MAX } } diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 9d0380df809a2..4268ce5612f52 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -349,27 +349,4 @@ impl WeightInfo { // Minimum execution time: 766_000 picoseconds. Weight::from_parts(807_000, 0) } - /// Storage: `Broadcaster::RegisteredPublishers` (r:1 w:0) - /// Proof: `Broadcaster::RegisteredPublishers` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) - /// Storage: `Broadcaster::PublishedKeys` (r:1 w:1) - /// Proof: `Broadcaster::PublishedKeys` (`max_values`: None, `max_size`: Some(1613), added: 4088, mode: `MaxEncodedLen`) - /// Storage: `Broadcaster::TotalStorageSize` (r:1 w:1) - /// Proof: `Broadcaster::TotalStorageSize` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `Broadcaster::PublisherExists` (r:1 w:1) - /// Proof: `Broadcaster::PublisherExists` (`max_values`: None, `max_size`: Some(13), added: 2488, mode: `MaxEncodedLen`) - /// The range of component `n` is `[1, 10]`. - pub(crate) fn publish(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `290` - // Estimated: `5078 + n * (372 ±0)` - // Minimum execution time: 33_000_000 picoseconds. - Weight::from_parts(55_703_232, 5078) - // Standard Error: 335_228 - .saturating_add(Weight::from_parts(5_762_702, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 372).saturating_mul(n.into())) - } } diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index 46fdbab20df3d..dc00bd4c868e6 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -307,11 +307,9 @@ impl XcmWeightInfo for WestendXcmWeight { fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { XcmGeneric::::execute_with_origin() } - fn publish(data: &PublishData) -> Weight { - // TODO: Generate proper weights via benchmarking - // For now, use a conservative estimate: base weight + per-item weight - Weight::from_parts(10_000_000, 0) - .saturating_add(Weight::from_parts(1_000_000, 0).saturating_mul(data.len() as u64)) + fn publish(_data: &PublishData) -> Weight { + // Westend does not currently support Publish operations + Weight::MAX } } From 89aba2bf07a2dcb569ee2cacdb8c33366a10a668 Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 20:07:46 -0300 Subject: [PATCH 58/69] feat: refund weight --- polkadot/runtime/parachains/src/broadcaster/mod.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/polkadot/runtime/parachains/src/broadcaster/mod.rs b/polkadot/runtime/parachains/src/broadcaster/mod.rs index bd0f35adff19c..d8edfe5ee9fd5 100644 --- a/polkadot/runtime/parachains/src/broadcaster/mod.rs +++ b/polkadot/runtime/parachains/src/broadcaster/mod.rs @@ -70,7 +70,7 @@ use frame_support::{ use frame_system::{ensure_root, ensure_signed, pallet_prelude::BlockNumberFor}; use polkadot_primitives::Id as ParaId; use scale_info::TypeInfo; -use sp_runtime::{traits::Zero, RuntimeDebug}; +use sp_runtime::traits::Zero; pub use pallet::*; @@ -87,7 +87,7 @@ mod benchmarking; mod tests; /// Information about a registered publisher. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Clone, PartialEq, Eq, Debug, TypeInfo, MaxEncodedLen)] pub struct PublisherInfo { /// The account that registered and manages this publisher. pub manager: AccountId, @@ -346,7 +346,7 @@ pub mod pallet { pub fn cleanup_published_data( origin: OriginFor, para_id: ParaId, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let info = RegisteredPublishers::::get(para_id) @@ -355,10 +355,15 @@ pub mod pallet { ensure!(who == info.manager, Error::::NotAuthorized); ensure!(PublisherExists::::get(para_id), Error::::NoDataToCleanup); + let actual_keys = PublishedKeys::::get(para_id).len() as u32; Self::do_cleanup_publisher(para_id)?; Self::deposit_event(Event::DataCleanedUp { para_id }); - Ok(()) + + Ok(Some( + ::WeightInfo::do_cleanup_publisher(actual_keys) + .saturating_add(T::DbWeight::get().reads(2)) + ).into()) } /// Deregister a publisher and release their deposit. From 19202a19f728f7933c93817d42b54177a586852f Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 21:11:09 -0300 Subject: [PATCH 59/69] feat: KeyToIncludeInRelayProofApi --- cumulus/primitives/core/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 58dc02fd75279..d4dc8ccb8dc4e 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -476,6 +476,7 @@ pub enum RelayStorageKey { /// Unprefixed storage key identifying the child trie root location. /// Prefix `:child_storage:default:` is added when accessing storage. /// Used to derive `ChildInfo` for reading child trie data. + /// Usage: let child_info = ChildInfo::new_default(&storage_key); storage_key: Vec, /// Key within the child trie. key: Vec, From f615241bf970cf8220b8ac3459836ef9d4f31ba2 Mon Sep 17 00:00:00 2001 From: metricaez Date: Tue, 16 Dec 2025 22:50:21 -0300 Subject: [PATCH 60/69] choir: shorter imports --- cumulus/client/consensus/aura/src/collator.rs | 5 +++-- cumulus/client/consensus/aura/src/collators/mod.rs | 4 ++-- cumulus/client/parachain-inherent/src/lib.rs | 12 +++++------- .../relay-chain-inprocess-interface/src/lib.rs | 6 ++++-- cumulus/client/relay-chain-rpc-interface/src/lib.rs | 9 ++++----- .../lib/src/fake_runtime_api/utils.rs | 4 ++-- cumulus/test/runtime/src/lib.rs | 4 ++-- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 2fab2f7e424d9..9a7cdadba5b36 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -33,6 +33,7 @@ use cumulus_client_consensus_common::{ use cumulus_client_parachain_inherent::{ParachainInherentData, ParachainInherentDataProvider}; use cumulus_primitives_core::{ relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, + RelayProofRequest, }; use cumulus_relay_chain_interface::RelayChainInterface; use sc_client_api::BackendTransaction; @@ -177,7 +178,7 @@ where parent_hash: Block::Hash, timestamp: impl Into>, relay_parent_descendants: Option, - relay_proof_request: cumulus_primitives_core::RelayProofRequest, + relay_proof_request: RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { let paras_inherent_data = ParachainInherentDataProvider::create_at( @@ -226,7 +227,7 @@ where validation_data: &PersistedValidationData, parent_hash: Block::Hash, timestamp: impl Into>, - relay_proof_request: cumulus_primitives_core::RelayProofRequest, + relay_proof_request: RelayProofRequest, collator_peer_id: PeerId, ) -> Result<(ParachainInherentData, InherentData), Box> { self.create_inherent_data_with_rp_offset( diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 0b2981691fc24..5fcb0bee7bf63 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -26,7 +26,7 @@ use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, + relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, RelayProofRequest, }; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use polkadot_node_subsystem::messages::{CollatorProtocolMessage, RuntimeApiRequest}; @@ -673,7 +673,7 @@ mod tests { fn get_relay_proof_request( client: &Client, parent_hash: Block::Hash, -) -> cumulus_primitives_core::RelayProofRequest +) -> RelayProofRequest where Block: BlockT, Client: ProvideRuntimeApi, diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 8ea3c4f96c5ae..b002b5c1fa59f 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -24,7 +24,7 @@ use cumulus_primitives_core::{ self, ApprovedPeerId, Block as RelayBlock, Hash as PHash, Header as RelayHeader, HrmpChannelId, }, - ParaId, PersistedValidationData, + ParaId, PersistedValidationData, RelayProofRequest, RelayStorageKey, }; pub use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_IDENTIFIER}; use cumulus_relay_chain_interface::RelayChainInterface; @@ -44,7 +44,7 @@ async fn collect_relay_storage_proof( include_authorities: bool, include_next_authorities: bool, additional_relay_state_keys: Vec>, -) -> Option { +) -> Option { use relay_chain::well_known_keys as relay_well_known_keys; let ingress_channels = relay_chain_interface @@ -169,11 +169,9 @@ async fn collect_relay_storage_proof( async fn collect_relay_storage_proofs( relay_chain_interface: &impl RelayChainInterface, relay_parent: PHash, - relay_proof_request: cumulus_primitives_core::RelayProofRequest, + relay_proof_request: RelayProofRequest, ) -> Option { - use cumulus_primitives_core::RelayStorageKey; - - let cumulus_primitives_core::RelayProofRequest { keys } = relay_proof_request; + let RelayProofRequest { keys } = relay_proof_request; if keys.is_empty() { return None; @@ -250,7 +248,7 @@ impl ParachainInherentDataProvider { para_id: ParaId, relay_parent_descendants: Vec, additional_relay_state_keys: Vec>, - relay_proof_request: cumulus_primitives_core::RelayProofRequest, + relay_proof_request: RelayProofRequest, collator_peer_id: PeerId, ) -> Option { let collator_peer_id = ApprovedPeerId::try_from(collator_peer_id.to_bytes()) diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index f7b3f810b6015..02726367b7fe5 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -33,7 +33,9 @@ use cumulus_primitives_core::{ }, InboundDownwardMessage, ParaId, PersistedValidationData, }; -use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface, RelayChainResult}; +use cumulus_relay_chain_interface::{ + ChildInfo, RelayChainError, RelayChainInterface, RelayChainResult, +}; use futures::{FutureExt, Stream, StreamExt}; use polkadot_primitives::CandidateEvent; use polkadot_service::{ @@ -243,7 +245,7 @@ impl RelayChainInterface for RelayChainInProcessInterface { async fn prove_child_read( &self, relay_parent: PHash, - child_info: &cumulus_relay_chain_interface::ChildInfo, + child_info: &ChildInfo, child_keys: &[Vec], ) -> RelayChainResult { let state_backend = self.backend.state_at(relay_parent, TrieCacheContext::Untrusted)?; diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 9c7732e6e452e..7960444060e38 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -26,7 +26,8 @@ use cumulus_primitives_core::{ InboundDownwardMessage, ParaId, PersistedValidationData, }; use cumulus_relay_chain_interface::{ - BlockNumber, CoreState, PHeader, RelayChainError, RelayChainInterface, RelayChainResult, + BlockNumber, ChildInfo, CoreIndex, CoreState, PHeader, RelayChainError, RelayChainInterface, + RelayChainResult, }; use futures::{FutureExt, Stream, StreamExt}; use polkadot_overseer::Handle; @@ -213,7 +214,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn prove_child_read( &self, _relay_parent: RelayHash, - _child_info: &cumulus_relay_chain_interface::ChildInfo, + _child_info: &ChildInfo, _child_keys: &[Vec], ) -> RelayChainResult { // Not implemented: requires relay chain RPC to expose child trie proof method. @@ -287,9 +288,7 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn claim_queue( &self, relay_parent: RelayHash, - ) -> RelayChainResult< - BTreeMap>, - > { + ) -> RelayChainResult>> { self.rpc_client.parachain_host_claim_queue(relay_parent).await } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 9d60924f55b84..ab0a9e1826739 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -15,7 +15,7 @@ // limitations under the License. pub(crate) mod imports { - pub use cumulus_primitives_core::ParaId; + pub use cumulus_primitives_core::{ParaId, RelayProofRequest}; pub use parachains_common_types::{AccountId, Balance, Nonce}; pub use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; pub use sp_runtime::{ @@ -177,7 +177,7 @@ macro_rules! impl_node_runtime_apis { } impl cumulus_primitives_core::KeyToIncludeInRelayProofApi<$block> for $runtime { - fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + fn keys_to_prove() -> RelayProofRequest { unimplemented!() } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index c20113aa69b24..1492ef02b8370 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -87,7 +87,7 @@ use sp_runtime::{ use sp_version::NativeVersion; use sp_version::RuntimeVersion; -use cumulus_primitives_core::ParaId; +use cumulus_primitives_core::{ParaId, RelayProofRequest}; // A few exports that help ease life for downstream crates. pub use frame_support::{ @@ -644,7 +644,7 @@ impl_runtime_apis! { } impl cumulus_primitives_core::KeyToIncludeInRelayProofApi for Runtime { - fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + fn keys_to_prove() -> RelayProofRequest { Default::default() } } From 74112e03dddaf656389a4b8db39d1e94d7662045 Mon Sep 17 00:00:00 2001 From: metricaez Date: Wed, 17 Dec 2025 08:51:39 -0300 Subject: [PATCH 61/69] feat: better naming and batch merging of proofs --- cumulus/client/parachain-inherent/src/lib.rs | 25 +++++++++++--------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index b002b5c1fa59f..093bc6990245e 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -159,14 +159,14 @@ async fn collect_relay_storage_proof( .ok() } -/// Collect storage proofs for relay chain data. +/// Collect additional storage proofs requested by the runtime. /// /// Generates proofs for both top-level relay chain storage and child trie data. /// Top-level keys are proven directly. Child trie roots are automatically included /// from their standard storage locations (`:child_storage:default:` + identifier). /// /// Returns a merged proof combining all requested data, or `None` if there are no requests. -async fn collect_relay_storage_proofs( +async fn collect_additional_storage_proofs( relay_chain_interface: &impl RelayChainInterface, relay_parent: PHash, relay_proof_request: RelayProofRequest, @@ -177,8 +177,6 @@ async fn collect_relay_storage_proofs( return None; } - let mut combined_proof: Option = None; - // Group keys by storage type let mut top_keys = Vec::new(); let mut child_keys: std::collections::BTreeMap, Vec>> = @@ -193,11 +191,14 @@ async fn collect_relay_storage_proofs( } } + // Collect all storage proofs + let mut all_proofs = Vec::new(); + // Collect top-level storage proofs if !top_keys.is_empty() { match relay_chain_interface.prove_read(relay_parent, &top_keys).await { Ok(top_proof) => { - combined_proof = Some(top_proof); + all_proofs.push(top_proof); }, Err(e) => { tracing::error!( @@ -215,10 +216,7 @@ async fn collect_relay_storage_proofs( let child_info = ChildInfo::new_default(&storage_key); match relay_chain_interface.prove_child_read(relay_parent, &child_info, &data_keys).await { Ok(child_proof) => { - combined_proof = match combined_proof { - None => Some(child_proof), - Some(existing) => Some(StorageProof::merge([existing, child_proof])), - }; + all_proofs.push(child_proof); }, Err(e) => { tracing::error!( @@ -232,7 +230,12 @@ async fn collect_relay_storage_proofs( } } - combined_proof + // Merge all proofs + if all_proofs.is_empty() { + None + } else { + Some(StorageProof::merge(all_proofs)) + } } pub struct ParachainInherentDataProvider; @@ -279,7 +282,7 @@ impl ParachainInherentDataProvider { // Collect additional requested storage proofs (top-level and child tries) if let Some(additional_proofs) = - collect_relay_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) + collect_additional_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) .await { relay_chain_state = StorageProof::merge([relay_chain_state, additional_proofs]); From 0c01199e0030064c3f37e4bd03117e20658b6551 Mon Sep 17 00:00:00 2001 From: metricaez Date: Thu, 18 Dec 2025 08:19:48 -0300 Subject: [PATCH 62/69] choir: minor comment fix --- cumulus/pallets/subscriber/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/subscriber/src/lib.rs b/cumulus/pallets/subscriber/src/lib.rs index 728a72bc6d79c..d8ded05fdebcb 100644 --- a/cumulus/pallets/subscriber/src/lib.rs +++ b/cumulus/pallets/subscriber/src/lib.rs @@ -20,7 +20,7 @@ //! //! This pallet is heavily opinionated toward a parachain-to-parachain publish-subscribe model. //! It assumes ParaId as the identifier for each child trie and is designed specifically for -//! extracting published data from relay chain proofs in a pubsub mechanism. +//! extracting published data from relay chain proofs that were stored in child tries in a pubsub mechanism. extern crate alloc; @@ -283,7 +283,7 @@ pub mod pallet { impl ProcessRelayProofKeys for Pallet { /// Process child trie data from the relay proof. /// - /// Note: This implementation only processes child trie keys (pubsub data). + /// This implementation only processes child trie keys (pubsub data). /// Main trie keys in the proof are intentionally ignored. fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight { let (subscriptions, subscriptions_weight) = T::SubscriptionHandler::subscriptions(); From 6cffde6f245ca4698a20a22acbb814c9992198c6 Mon Sep 17 00:00:00 2001 From: metricaez Date: Thu, 18 Dec 2025 09:38:17 -0300 Subject: [PATCH 63/69] choir: more comments cleanup --- cumulus/pallets/subscriber/src/test_util.rs | 4 ++-- cumulus/pallets/subscriber/src/tests.rs | 4 ++-- polkadot/runtime/parachains/src/broadcaster/weights.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cumulus/pallets/subscriber/src/test_util.rs b/cumulus/pallets/subscriber/src/test_util.rs index e1b4a8efdd3a0..a52b9659a245f 100644 --- a/cumulus/pallets/subscriber/src/test_util.rs +++ b/cumulus/pallets/subscriber/src/test_util.rs @@ -94,7 +94,7 @@ pub mod bench_proof_builder { Ok(recorder.into_raw_storage_proof()) } - /// Build relay chain state proof w/ child trie data + /// Build relay chain state proof with child trie data pub fn build_sproof_with_child_data( publishers: &[(ParaId, Vec<(Vec, Vec)>)], ) -> RelayChainStateProof { @@ -132,7 +132,7 @@ pub mod bench_proof_builder { child_roots.push((prefixed_key.to_vec(), child_root.encode())); } - // Build main trie w/ child roots + // Build main trie with child roots use hash_db::{HashDB, EMPTY_PREFIX}; let empty_root = empty_trie_root::>(); diff --git a/cumulus/pallets/subscriber/src/tests.rs b/cumulus/pallets/subscriber/src/tests.rs index f4667ec55ca89..38ac475a585ea 100644 --- a/cumulus/pallets/subscriber/src/tests.rs +++ b/cumulus/pallets/subscriber/src/tests.rs @@ -69,7 +69,7 @@ fn root_change_triggers_processing() { Pallet::::process_relay_proof_keys(&proof1); assert_eq!(ReceivedData::get().len(), 1); - // Second block with different value (root changed) + // Second block with different value ReceivedData::set(vec![]); let proof2 = build_test_proof(publisher, vec![(key.clone(), value2.clone())]); Pallet::::process_relay_proof_keys(&proof2); @@ -94,7 +94,7 @@ fn unchanged_root_skips_processing() { Pallet::::process_relay_proof_keys(&proof); assert_eq!(ReceivedData::get().len(), 1); - // Second block with same data (unchanged root) + // Second block with same data ReceivedData::set(vec![]); let proof2 = build_test_proof(publisher, vec![(key.clone(), value)]); Pallet::::process_relay_proof_keys(&proof2); diff --git a/polkadot/runtime/parachains/src/broadcaster/weights.rs b/polkadot/runtime/parachains/src/broadcaster/weights.rs index 3586a929caa4b..3eac17816853c 100644 --- a/polkadot/runtime/parachains/src/broadcaster/weights.rs +++ b/polkadot/runtime/parachains/src/broadcaster/weights.rs @@ -28,7 +28,7 @@ pub trait WeightInfo { fn do_cleanup_publisher(k: u32) -> Weight; } -/// Placeholder weights (to be replaced with benchmarked values). +/// Placeholder weights to be replaced with benchmarked values. impl WeightInfo for () { fn register_publisher() -> Weight { Weight::zero() From de52b92b000a1ebe1838a93e0a2d95c3776aaec4 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 19 Dec 2025 13:41:21 -0300 Subject: [PATCH 64/69] feat: naming and comment suggestions --- cumulus/primitives/core/src/lib.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index d4dc8ccb8dc4e..fddf4ffa8e730 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -545,16 +545,11 @@ sp_api::decl_runtime_apis! { /// /// This API allows parachains to request both top-level relay chain storage keys /// and child trie storage keys to be included in the relay chain state proof. - pub trait KeyToIncludeInRelayProofApi { + pub trait KeyToIncludeInRelayProof { /// Returns relay chain storage proof requests. /// - /// The returned `RelayProofRequest` contains a list of storage keys where each key - /// can be either: - /// - `RelayStorageKey::Top`: Top-level relay chain storage key - /// - `RelayStorageKey::Child`: Child trie storage, containing the child trie identifier - /// and the key to prove from that child trie - /// - /// The collator generates proofs for these and includes them in the relay chain state proof. + + /// The collator will include them in the relay chain proof that is passed alongside the parachain inherent into the runtime. fn keys_to_prove() -> RelayProofRequest; } } From 2dd623d061ee8315e7dfdbdff5018b540814edfa Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 19 Dec 2025 14:31:57 -0300 Subject: [PATCH 65/69] fix: name change occurences fix --- cumulus/client/consensus/aura/src/collators/lookahead.rs | 6 +++--- cumulus/client/consensus/aura/src/collators/mod.rs | 4 ++-- cumulus/pallets/parachain-system/src/lib.rs | 2 +- cumulus/polkadot-omni-node/lib/src/common/aura.rs | 6 +++--- .../polkadot-omni-node/lib/src/fake_runtime_api/utils.rs | 2 +- cumulus/test/runtime/src/lib.rs | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index c33ce1c41d6c9..f20a64abdfc7b 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -36,7 +36,7 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{CollectCollationInfo, KeyToIncludeInRelayProofApi, PersistedValidationData}; +use cumulus_primitives_core::{CollectCollationInfo, KeyToIncludeInRelayProof, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; use sp_consensus::Environment; @@ -167,7 +167,7 @@ where Client::Api: AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProofApi, + + KeyToIncludeInRelayProof, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -221,7 +221,7 @@ where Client::Api: AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProofApi, + + KeyToIncludeInRelayProof, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 5fcb0bee7bf63..e22d77c3d1b0f 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -26,7 +26,7 @@ use codec::Codec; use cumulus_client_consensus_common::{self as consensus_common, ParentSearchParams}; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProofApi, RelayProofRequest, + relay_chain::Header as RelayHeader, BlockT, KeyToIncludeInRelayProof, RelayProofRequest, }; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use polkadot_node_subsystem::messages::{CollatorProtocolMessage, RuntimeApiRequest}; @@ -677,7 +677,7 @@ fn get_relay_proof_request( where Block: BlockT, Client: ProvideRuntimeApi, - Client::Api: KeyToIncludeInRelayProofApi, + Client::Api: KeyToIncludeInRelayProof, { client .runtime_api() diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index ed55293a88db0..1b26dead9bc02 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -269,7 +269,7 @@ pub mod pallet { /// /// This allows parachains to process data from the relay chain state proof, /// including both child trie keys and main trie keys that were requested - /// via `KeyToIncludeInRelayProofApi`. + /// via `KeyToIncludeInRelayProof`. type RelayProofKeysProcessor: relay_state_snapshot::ProcessRelayProofKeys; } diff --git a/cumulus/polkadot-omni-node/lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs index 10a631306b33a..b6f156f96dfdb 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/aura.rs @@ -18,7 +18,7 @@ use codec::Codec; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::KeyToIncludeInRelayProofApi; +use cumulus_primitives_core::KeyToIncludeInRelayProof; use sp_consensus_aura::AuraApi; use sp_runtime::{ app_crypto::{AppCrypto, AppPair, AppSignature, Pair}, @@ -54,7 +54,7 @@ pub trait AuraRuntimeApi: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProofApi + + KeyToIncludeInRelayProof + Sized { /// Check if the runtime has the Aura API. @@ -68,6 +68,6 @@ impl AuraRuntimeApi for T wher T: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProofApi + + KeyToIncludeInRelayProof { } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index ab0a9e1826739..1f6dfa177e96d 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -176,7 +176,7 @@ macro_rules! impl_node_runtime_apis { } } - impl cumulus_primitives_core::KeyToIncludeInRelayProofApi<$block> for $runtime { + impl cumulus_primitives_core::KeyToIncludeInRelayProof<$block> for $runtime { fn keys_to_prove() -> RelayProofRequest { unimplemented!() } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 1492ef02b8370..2b110e9cb8b90 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -643,7 +643,7 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::KeyToIncludeInRelayProofApi for Runtime { + impl cumulus_primitives_core::KeyToIncludeInRelayProof for Runtime { fn keys_to_prove() -> RelayProofRequest { Default::default() } From bcba0f9ebfd9660d5d6ee1f04809a981abf636f7 Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 19 Dec 2025 16:05:41 -0300 Subject: [PATCH 66/69] feat: collect static keys and unify prove_read --- cumulus/client/parachain-inherent/src/lib.rs | 119 ++++++++----------- 1 file changed, 51 insertions(+), 68 deletions(-) diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 093bc6990245e..6c72e1774b1ca 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -35,16 +35,15 @@ use sp_storage::ChildInfo; const LOG_TARGET: &str = "parachain-inherent"; -/// Collect the relevant relay chain state in form of a proof for putting it into the validation -/// data inherent. -async fn collect_relay_storage_proof( +/// Builds the list of static relay chain storage keys that are always needed for parachain +/// validation. +async fn get_static_relay_storage_keys( relay_chain_interface: &impl RelayChainInterface, para_id: ParaId, relay_parent: PHash, include_authorities: bool, include_next_authorities: bool, - additional_relay_state_keys: Vec>, -) -> Option { +) -> Option>> { use relay_chain::well_known_keys as relay_well_known_keys; let ingress_channels = relay_chain_interface @@ -138,53 +137,49 @@ async fn collect_relay_storage_proof( relevant_keys.push(relay_well_known_keys::NEXT_AUTHORITIES.to_vec()); } - // Add additional relay state keys - let unique_keys: Vec> = additional_relay_state_keys - .into_iter() - .filter(|key| !relevant_keys.contains(key)) - .collect(); - relevant_keys.extend(unique_keys); - - relay_chain_interface - .prove_read(relay_parent, &relevant_keys) - .await - .map_err(|e| { - tracing::error!( - target: LOG_TARGET, - relay_parent = ?relay_parent, - error = ?e, - "Cannot obtain read proof from relay chain.", - ); - }) - .ok() + Some(relevant_keys) } -/// Collect additional storage proofs requested by the runtime. -/// -/// Generates proofs for both top-level relay chain storage and child trie data. -/// Top-level keys are proven directly. Child trie roots are automatically included -/// from their standard storage locations (`:child_storage:default:` + identifier). -/// -/// Returns a merged proof combining all requested data, or `None` if there are no requests. -async fn collect_additional_storage_proofs( +/// Collect the relevant relay chain state in form of a proof for putting it into the validation +/// data inherent. +async fn collect_relay_storage_proof( relay_chain_interface: &impl RelayChainInterface, + para_id: ParaId, relay_parent: PHash, + include_authorities: bool, + include_next_authorities: bool, + additional_relay_state_keys: Vec>, relay_proof_request: RelayProofRequest, ) -> Option { - let RelayProofRequest { keys } = relay_proof_request; + // Get static keys that are always needed + let mut all_top_keys = get_static_relay_storage_keys( + relay_chain_interface, + para_id, + relay_parent, + include_authorities, + include_next_authorities, + ) + .await?; - if keys.is_empty() { - return None; - } + // Add additional_relay_state_keys + let unique_keys: Vec> = additional_relay_state_keys + .into_iter() + .filter(|key| !all_top_keys.contains(key)) + .collect(); + all_top_keys.extend(unique_keys); - // Group keys by storage type - let mut top_keys = Vec::new(); + // Group requested keys by storage type + let RelayProofRequest { keys } = relay_proof_request; let mut child_keys: std::collections::BTreeMap, Vec>> = std::collections::BTreeMap::new(); for key in keys { match key { - RelayStorageKey::Top(k) => top_keys.push(k), + RelayStorageKey::Top(k) => { + if !all_top_keys.contains(&k) { + all_top_keys.push(k); + } + }, RelayStorageKey::Child { storage_key, key } => { child_keys.entry(storage_key).or_default().push(key); }, @@ -194,21 +189,20 @@ async fn collect_additional_storage_proofs( // Collect all storage proofs let mut all_proofs = Vec::new(); - // Collect top-level storage proofs - if !top_keys.is_empty() { - match relay_chain_interface.prove_read(relay_parent, &top_keys).await { - Ok(top_proof) => { - all_proofs.push(top_proof); - }, - Err(e) => { - tracing::error!( - target: LOG_TARGET, - relay_parent = ?relay_parent, - error = ?e, - "Cannot obtain top-level storage proof from relay chain.", - ); - }, - } + // Collect top-level storage proof. + match relay_chain_interface.prove_read(relay_parent, &all_top_keys).await { + Ok(top_proof) => { + all_proofs.push(top_proof); + }, + Err(e) => { + tracing::error!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + error = ?e, + "Cannot obtain relay chain storage proof.", + ); + return None; + }, } // Collect child trie proofs @@ -231,11 +225,7 @@ async fn collect_additional_storage_proofs( } // Merge all proofs - if all_proofs.is_empty() { - None - } else { - Some(StorageProof::merge(all_proofs)) - } + Some(StorageProof::merge(all_proofs)) } pub struct ParachainInherentDataProvider; @@ -270,24 +260,17 @@ impl ParachainInherentDataProvider { .iter() .skip(1) .any(sc_consensus_babe::contains_epoch_change::); - let mut relay_chain_state = collect_relay_storage_proof( + let relay_chain_state = collect_relay_storage_proof( relay_chain_interface, para_id, relay_parent, !relay_parent_descendants.is_empty(), include_next_authorities, additional_relay_state_keys, + relay_proof_request, ) .await?; - // Collect additional requested storage proofs (top-level and child tries) - if let Some(additional_proofs) = - collect_additional_storage_proofs(relay_chain_interface, relay_parent, relay_proof_request) - .await - { - relay_chain_state = StorageProof::merge([relay_chain_state, additional_proofs]); - } - let downward_messages = relay_chain_interface .retrieve_dmq_contents(para_id, relay_parent) .await From be0b30592f11857a9269aed1377b0aa2045049ff Mon Sep 17 00:00:00 2001 From: metricaez Date: Fri, 19 Dec 2025 23:06:33 -0300 Subject: [PATCH 67/69] feat: move relay proof handler to OnSystemEvent --- cumulus/pallets/aura-ext/src/test.rs | 1 - cumulus/pallets/parachain-system/src/lib.rs | 32 +++++++++++++------ cumulus/pallets/parachain-system/src/mock.rs | 1 - .../src/relay_state_snapshot.rs | 15 --------- cumulus/pallets/solo-to-para/src/lib.rs | 5 +++ cumulus/pallets/xcmp-queue/src/mock.rs | 1 - .../assets/asset-hub-rococo/src/lib.rs | 1 - .../assets/asset-hub-westend/src/lib.rs | 1 - .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 - .../bridge-hubs/bridge-hub-westend/src/lib.rs | 1 - .../collectives-westend/src/lib.rs | 1 - .../coretime/coretime-westend/src/lib.rs | 1 - .../glutton/glutton-westend/src/lib.rs | 1 - .../runtimes/people/people-westend/src/lib.rs | 1 - .../runtimes/testing/penpal/src/lib.rs | 1 - .../testing/yet-another-parachain/src/lib.rs | 1 - cumulus/test/runtime/src/lib.rs | 1 - 17 files changed, 27 insertions(+), 39 deletions(-) diff --git a/cumulus/pallets/aura-ext/src/test.rs b/cumulus/pallets/aura-ext/src/test.rs index 3486e56a5c2e4..7c4c78ab2a5b0 100644 --- a/cumulus/pallets/aura-ext/src/test.rs +++ b/cumulus/pallets/aura-ext/src/test.rs @@ -151,7 +151,6 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } fn set_ancestors() { diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 1b26dead9bc02..5f12e471e8e98 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -85,7 +85,6 @@ use unincluded_segment::{ }; pub use consensus_hook::{ConsensusHook, ExpectParentIncluded}; -pub use relay_state_snapshot::ProcessRelayProofKeys; /// Register the `validate_block` function that is used by parachains to validate blocks on a /// validator. /// @@ -264,13 +263,6 @@ pub mod pallet { /// /// If set to 0, this config has no impact. type RelayParentOffset: Get; - - /// Processor for relay chain proof keys. - /// - /// This allows parachains to process data from the relay chain state proof, - /// including both child trie keys and main trie keys that were requested - /// via `KeyToIncludeInRelayProof`. - type RelayProofKeysProcessor: relay_state_snapshot::ProcessRelayProofKeys; } #[pallet::hooks] @@ -709,7 +701,9 @@ pub mod pallet { >::put(relevant_messaging_state.clone()); >::put(host_config); - total_weight.saturating_accrue(T::RelayProofKeysProcessor::process_relay_proof_keys(&relay_state_proof)); + total_weight.saturating_accrue( + ::on_relay_state_proof(&relay_state_proof), + ); ::on_validation_data(&vfp); @@ -1776,13 +1770,31 @@ impl polkadot_runtime_parachains::EnsureForParachain for Pallet { /// Or like [`on_validation_code_applied`](Self::on_validation_code_applied) that is called /// when the new validation is written to the state. This means that /// from the next block the runtime is being using this new code. -#[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OnSystemEvent { /// Called in each blocks once when the validation data is set by the inherent. fn on_validation_data(data: &PersistedValidationData); /// Called when the validation code is being applied, aka from the next block on this is the new /// runtime. fn on_validation_code_applied(); + /// Called to process keys from the verified relay chain state proof. + fn on_relay_state_proof(relay_state_proof: &relay_state_snapshot::RelayChainStateProof) -> Weight; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl OnSystemEvent for Tuple { + fn on_validation_data(data: &PersistedValidationData) { + for_tuples!( #( Tuple::on_validation_data(data); )* ); + } + + fn on_validation_code_applied() { + for_tuples!( #( Tuple::on_validation_code_applied(); )* ); + } + + fn on_relay_state_proof(relay_state_proof: &relay_state_snapshot::RelayChainStateProof) -> Weight { + let mut weight = Weight::zero(); + for_tuples!( #( weight = weight.saturating_add(Tuple::on_relay_state_proof(relay_state_proof)); )* ); + weight + } } /// Holds the most recent relay-parent state root and block number of the current parachain block. diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index b361031be2c37..d3c7cef52b637 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -99,7 +99,6 @@ impl Config for Test { type ConsensusHook = TestConsensusHook; type WeightInfo = (); type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } std::thread_local! { diff --git a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs index 7c6efb5ddf73e..e9c32997f9760 100644 --- a/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs +++ b/cumulus/pallets/parachain-system/src/relay_state_snapshot.rs @@ -21,26 +21,11 @@ use codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain, AbridgedHostConfiguration, AbridgedHrmpChannel, ParaId, }; -use frame_support::weights::Weight; use scale_info::TypeInfo; use sp_runtime::traits::HashingFor; use sp_state_machine::{Backend, TrieBackend, TrieBackendBuilder}; use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX}; -/// Process keys from verified relay chain state proofs. -/// -/// This trait allows processing of relay chain storage data from the verified proof. -pub trait ProcessRelayProofKeys { - /// Process keys from a verified relay state proof. - fn process_relay_proof_keys(verified_proof: &RelayChainStateProof) -> Weight; -} - -impl ProcessRelayProofKeys for () { - fn process_relay_proof_keys(_verified_proof: &RelayChainStateProof) -> Weight { - Weight::zero() - } -} - /// The capacity of the upward message queue of a parachain on the relay chain. // The field order should stay the same as the data can be found in the proof to ensure both are // have the same encoded representation. diff --git a/cumulus/pallets/solo-to-para/src/lib.rs b/cumulus/pallets/solo-to-para/src/lib.rs index ff68d1b63fe7f..0d6d82cf4590b 100644 --- a/cumulus/pallets/solo-to-para/src/lib.rs +++ b/cumulus/pallets/solo-to-para/src/lib.rs @@ -103,5 +103,10 @@ pub mod pallet { fn on_validation_code_applied() { crate::Pallet::::set_pending_custom_validation_head_data(); } + fn on_relay_state_proof( + _relay_state_proof: ¶chain_system::relay_state_snapshot::RelayChainStateProof, + ) -> frame_support::weights::Weight { + frame_support::weights::Weight::zero() + } } } diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 1e32c9003a948..3be87221c052e 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -106,7 +106,6 @@ impl cumulus_pallet_parachain_system::Config for Test { type CheckAssociatedRelayNumber = AnyRelayNumber; type ConsensusHook = cumulus_pallet_parachain_system::consensus_hook::ExpectParentIncluded; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 720ebe8ee2b4b..69c7a9e544326 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -745,7 +745,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f0cb527c42f09..8b9cbf66015c7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -903,7 +903,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index dac8fc398127b..cfc58a2a4f6eb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -400,7 +400,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 625c2ebe24507..62532fac5fec3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -392,7 +392,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 37edc0f02329c..ba70aabd9d0cf 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -423,7 +423,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index fb72ae7a59586..ed8748a34933a 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -305,7 +305,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 98131ac4ef58a..eadc4d289fe9d 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -191,7 +191,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } parameter_types! { diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 427ff1454d8fd..dc4a616f02d1c 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -281,7 +281,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ConsensusHook = ConsensusHook; type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 4d1ee6a0d2254..bc018b160f778 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -664,7 +664,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { >; type RelayParentOffset = ConstU32<0>; - type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} diff --git a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs index 1c5ead9e7b90b..0f5d2ecd99494 100644 --- a/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/yet-another-parachain/src/lib.rs @@ -368,7 +368,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; - type RelayProofKeysProcessor = (); } impl pallet_message_queue::Config for Runtime { diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 2b110e9cb8b90..57ea6cab00477 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -385,7 +385,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; type ConsensusHook = ConsensusHook; type RelayParentOffset = ConstU32; - type RelayProofKeysProcessor = (); } impl parachain_info::Config for Runtime {} From 228f95fb4e42b3a0d3524566c73a41a850a6fb03 Mon Sep 17 00:00:00 2001 From: metricaez Date: Sat, 20 Dec 2025 12:55:01 -0300 Subject: [PATCH 68/69] feat: prove_child_read for relay rpc interface --- .../relay-chain-rpc-interface/src/lib.rs | 22 +++++++++++-------- .../src/rpc_client.rs | 11 ++++++++++ 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/cumulus/client/relay-chain-rpc-interface/src/lib.rs b/cumulus/client/relay-chain-rpc-interface/src/lib.rs index 7960444060e38..b88d95e9c1778 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/lib.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/lib.rs @@ -213,16 +213,20 @@ impl RelayChainInterface for RelayChainRpcInterface { async fn prove_child_read( &self, - _relay_parent: RelayHash, - _child_info: &ChildInfo, - _child_keys: &[Vec], + relay_parent: RelayHash, + child_info: &ChildInfo, + child_keys: &[Vec], ) -> RelayChainResult { - // Not implemented: requires relay chain RPC to expose child trie proof method. - tracing::warn!( - target: "relay-chain-rpc-interface", - "prove_child_read not implemented for RPC interface, returning empty proof" - ); - Ok(StorageProof::empty()) + let child_storage_key = child_info.prefixed_storage_key(); + let storage_keys: Vec = + child_keys.iter().map(|key| StorageKey(key.clone())).collect(); + + self.rpc_client + .state_get_child_read_proof(child_storage_key, storage_keys, Some(relay_parent)) + .await + .map(|read_proof| { + StorageProof::new(read_proof.proof.into_iter().map(|bytes| bytes.to_vec())) + }) } /// Wait for a given relay chain block diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 80858a665cfaf..52039a4236a58 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -276,6 +276,17 @@ impl RelayChainRpcClient { self.request("state_getReadProof", params).await } + /// Get child trie read proof for `child_keys` + pub async fn state_get_child_read_proof( + &self, + child_storage_key: sp_core::storage::PrefixedStorageKey, + child_keys: Vec, + at: Option, + ) -> Result, RelayChainError> { + let params = rpc_params![child_storage_key, child_keys, at]; + self.request("state_getChildReadProof", params).await + } + /// Retrieve storage item at `storage_key` pub async fn state_get_storage( &self, From 5bd4f19dd323ce2026a0b34ad6cd97f510e1d038 Mon Sep 17 00:00:00 2001 From: metricaez Date: Mon, 22 Dec 2025 00:08:37 -0300 Subject: [PATCH 69/69] feat: imp KeyToIncludeInRelayProof for test pallet --- Cargo.lock | 1 + cumulus/test/runtime/Cargo.toml | 4 ++++ cumulus/test/runtime/src/lib.rs | 13 ++++++++--- cumulus/test/runtime/src/test_pallet.rs | 29 +++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e8969262b741..1e5d6425cd5c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5054,6 +5054,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "parity-scale-codec", + "polkadot-primitives", "scale-info", "serde_json", "sp-api", diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index cc8142ff2dedb..703e346582891 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -41,6 +41,9 @@ sp-session = { workspace = true } sp-transaction-pool = { workspace = true } sp-version = { workspace = true } +# Polkadot +polkadot-primitives = { workspace = true } + # Cumulus cumulus-pallet-aura-ext = { workspace = true } cumulus-pallet-parachain-system = { workspace = true } @@ -75,6 +78,7 @@ std = [ "pallet-timestamp/std", "pallet-transaction-payment/std", "parachain-info/std", + "polkadot-primitives/std", "scale-info/std", "serde_json/std", "sp-api/std", diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 57ea6cab00477..e668fc076df84 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -374,7 +374,7 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type WeightInfo = (); type SelfParaId = parachain_info::Pallet; type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); + type OnSystemEvent = TestPallet; type OutboundXcmpMessageSource = (); // Ignore all DMP messages by enqueueing them into `()`: type DmpQueue = frame_support::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; @@ -643,8 +643,15 @@ impl_runtime_apis! { } impl cumulus_primitives_core::KeyToIncludeInRelayProof for Runtime { - fn keys_to_prove() -> RelayProofRequest { - Default::default() + fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { + use cumulus_primitives_core::RelayStorageKey; + + RelayProofRequest { + keys: vec![ + // Request a well-known key to verify its inclusion in the relay proof. + RelayStorageKey::Top(test_pallet::RELAY_EPOCH_INDEX_KEY.to_vec()), + ], + } } } } diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index a972198c300d9..25d1d03546b9d 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -17,10 +17,15 @@ /// A special pallet that exposes dispatchables that are only useful for testing. pub use pallet::*; +use polkadot_primitives::well_known_keys; + /// Some key that we set in genesis and only read in [`TestOnRuntimeUpgrade`] to ensure that /// [`OnRuntimeUpgrade`] works as expected. pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; +/// A well-known key to request for inclusion in the proof. +pub use well_known_keys::EPOCH_INDEX as RELAY_EPOCH_INDEX_KEY; + #[frame_support::pallet(dev_mode)] pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; @@ -121,3 +126,27 @@ pub mod pallet { } } } + +impl cumulus_pallet_parachain_system::OnSystemEvent for Pallet { + fn on_validation_data(_data: &cumulus_primitives_core::PersistedValidationData) { + // Nothing to do here for tests + } + + fn on_validation_code_applied() { + // Nothing to do here for tests + } + + fn on_relay_state_proof( + relay_state_proof: &cumulus_pallet_parachain_system::relay_state_snapshot::RelayChainStateProof, + ) -> frame_support::weights::Weight { + use crate::test_pallet::RELAY_EPOCH_INDEX_KEY; + + // Expect the requested key to be part of the proof. + relay_state_proof + .read_optional_entry::(RELAY_EPOCH_INDEX_KEY) + .expect("Invalid relay chain state proof") + .expect("EPOCH_INDEX must be present"); + + frame_support::weights::Weight::zero() + } +}