diff --git a/Cargo.lock b/Cargo.lock index 1410f267b54a0..9e48f082c0f09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6540,6 +6540,7 @@ dependencies = [ "sp-io 30.0.0", "sp-npos-elections", "sp-runtime 31.0.1", + "sp-std 14.0.0", ] [[package]] @@ -10926,6 +10927,32 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" +[[package]] +name = "pallet-ahm-test" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "log", + "pallet-authorship", + "pallet-balances", + "pallet-election-provider-multi-block", + "pallet-root-offences", + "pallet-session", + "pallet-staking", + "pallet-staking-async", + "pallet-staking-async-ah-client", + "pallet-staking-async-rc-client", + "pallet-timestamp", + "parity-scale-codec", + "polkadot-sdk-frame", + "scale-info", + "sp-core 28.0.0", + "sp-session", + "sp-staking", + "sp-tracing 16.0.0", +] + [[package]] name = "pallet-alliance" version = "27.0.0" @@ -11185,6 +11212,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-session", "sp-staking", + "sp-tracing 16.0.0", ] [[package]] @@ -11274,6 +11302,7 @@ dependencies = [ "sp-session", "sp-staking", "sp-state-machine 0.35.0", + "sp-tracing 16.0.0", ] [[package]] @@ -11762,6 +11791,29 @@ dependencies = [ "sp-tracing 16.0.0", ] +[[package]] +name = "pallet-election-provider-multi-block" +version = "0.9.0" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "parity-scale-codec", + "parking_lot 0.12.3", + "rand 0.8.5", + "scale-info", + "sp-arithmetic 23.0.0", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-npos-elections", + "sp-runtime 31.0.1", + "sp-std 14.0.0", + "sp-tracing 16.0.0", +] + [[package]] name = "pallet-election-provider-multi-phase" version = "27.0.0" @@ -12053,6 +12105,7 @@ dependencies = [ "sp-runtime 31.0.1", "sp-session", "sp-staking", + "sp-tracing 16.0.0", ] [[package]] @@ -12449,6 +12502,7 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-staking", + "sp-tracing 16.0.0", ] [[package]] @@ -12767,6 +12821,7 @@ dependencies = [ "pallet-timestamp", "parity-scale-codec", "scale-info", + "sp-core 28.0.0", "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-staking", @@ -12903,6 +12958,7 @@ dependencies = [ "sp-io 30.0.0", "sp-runtime 31.0.1", "sp-session", + "sp-staking", ] [[package]] @@ -12964,6 +13020,321 @@ dependencies = [ "substrate-test-utils", ] +[[package]] +name = "pallet-staking-async" +version = "0.1.0" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-bags-list", + "pallet-balances", + "pallet-staking-async-rc-client", + "parity-scale-codec", + "rand 0.8.5", + "rand_chacha 0.3.1", + "scale-info", + "serde", + "sp-application-crypto 30.0.0", + "sp-core 28.0.0", + "sp-io 30.0.0", + "sp-npos-elections", + "sp-runtime 31.0.1", + "sp-staking", + "sp-tracing 16.0.0", + "substrate-test-utils", +] + +[[package]] +name = "pallet-staking-async-ah-client" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "log", + "pallet-authorship", + "pallet-session", + "pallet-staking-async-rc-client", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core 28.0.0", + "sp-runtime 31.0.1", + "sp-staking", +] + +[[package]] +name = "pallet-staking-async-parachain-runtime" +version = "0.15.0" +dependencies = [ + "asset-test-utils", + "assets-common", + "bp-asset-hub-rococo", + "bp-bridge-hub-rococo", + "bp-bridge-hub-westend", + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-weight-reclaim", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-election-provider-support", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-asset-conversion", + "pallet-asset-conversion-ops", + "pallet-asset-conversion-tx-payment", + "pallet-asset-rate", + "pallet-asset-rewards", + "pallet-assets", + "pallet-assets-freezer", + "pallet-aura", + "pallet-authorship", + "pallet-bags-list", + "pallet-balances", + "pallet-collator-selection", + "pallet-conviction-voting", + "pallet-delegated-staking", + "pallet-election-provider-multi-block", + "pallet-fast-unstake", + "pallet-message-queue", + "pallet-migrations", + "pallet-multisig", + "pallet-nft-fractionalization", + "pallet-nfts", + "pallet-nfts-runtime-api", + "pallet-nomination-pools", + "pallet-nomination-pools-runtime-api", + "pallet-parameters", + "pallet-preimage", + "pallet-proxy", + "pallet-referenda", + "pallet-scheduler", + "pallet-session", + "pallet-staking-async", + "pallet-staking-async-rc-client", + "pallet-staking-async-runtime-api", + "pallet-state-trie-migration", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-treasury", + "pallet-uniques", + "pallet-utility", + "pallet-vesting", + "pallet-whitelist", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub-router", + "parachains-common", + "parachains-runtimes-test-utils", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "primitive-types 0.13.1", + "scale-info", + "serde_json", + "sp-api 26.0.0", + "sp-arithmetic 23.0.0", + "sp-block-builder", + "sp-consensus-aura", + "sp-core 28.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-keyring", + "sp-npos-elections", + "sp-offchain", + "sp-runtime 31.0.1", + "sp-session", + "sp-staking", + "sp-std 14.0.0", + "sp-storage 19.0.0", + "sp-transaction-pool", + "sp-version 29.0.0", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnet-parachains-constants", + "westend-runtime-constants", + "xcm-runtime-apis", +] + +[[package]] +name = "pallet-staking-async-rc-client" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "scale-info", + "sp-core 28.0.0", + "sp-runtime 31.0.1", + "sp-staking", +] + +[[package]] +name = "pallet-staking-async-rc-runtime" +version = "7.0.0" +dependencies = [ + "approx", + "binary-merkle-tree", + "bitvec", + "frame-benchmarking", + "frame-election-provider-support", + "frame-executive", + "frame-metadata-hash-extension", + "frame-remote-externalities", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-asset-rate", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-bags-list", + "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", + "pallet-conviction-voting", + "pallet-delegated-staking", + "pallet-election-provider-multi-phase", + "pallet-election-provider-support-benchmarking", + "pallet-elections-phragmen", + "pallet-fast-unstake", + "pallet-grandpa", + "pallet-identity", + "pallet-indices", + "pallet-membership", + "pallet-message-queue", + "pallet-migrations", + "pallet-mmr", + "pallet-multisig", + "pallet-nomination-pools", + "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", + "pallet-offences", + "pallet-offences-benchmarking", + "pallet-parameters", + "pallet-preimage", + "pallet-proxy", + "pallet-recovery", + "pallet-referenda", + "pallet-root-testing", + "pallet-scheduler", + "pallet-session", + "pallet-session-benchmarking", + "pallet-society", + "pallet-staking", + "pallet-staking-async-ah-client", + "pallet-staking-async-rc-client", + "pallet-staking-async-rc-runtime-constants", + "pallet-state-trie-migration", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-treasury", + "pallet-utility", + "pallet-vesting", + "pallet-whitelist", + "pallet-xcm", + "pallet-xcm-benchmarks", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-common", + "polkadot-runtime-parachains", + "scale-info", + "serde", + "serde_derive", + "serde_json", + "smallvec", + "sp-api 26.0.0", + "sp-application-crypto 30.0.0", + "sp-arithmetic 23.0.0", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-consensus-grandpa", + "sp-core 28.0.0", + "sp-genesis-builder", + "sp-inherents", + "sp-io 30.0.0", + "sp-keyring", + "sp-mmr-primitives", + "sp-npos-elections", + "sp-offchain", + "sp-runtime 31.0.1", + "sp-session", + "sp-staking", + "sp-storage 19.0.0", + "sp-tracing 16.0.0", + "sp-transaction-pool", + "sp-version 29.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "tiny-keccak", + "tokio", + "xcm-runtime-apis", +] + +[[package]] +name = "pallet-staking-async-rc-runtime-constants" +version = "7.0.0" +dependencies = [ + "frame-support", + "polkadot-primitives", + "polkadot-runtime-common", + "smallvec", + "sp-core 28.0.0", + "sp-runtime 31.0.1", + "sp-weights 27.0.0", + "staging-xcm", + "staging-xcm-builder", +] + +[[package]] +name = "pallet-staking-async-reward-fn" +version = "19.0.0" +dependencies = [ + "log", + "sp-arithmetic 23.0.0", +] + +[[package]] +name = "pallet-staking-async-runtime-api" +version = "14.0.0" +dependencies = [ + "parity-scale-codec", + "sp-api 26.0.0", + "sp-staking", +] + [[package]] name = "pallet-staking-reward-curve" version = "11.0.0" @@ -15337,6 +15708,7 @@ dependencies = [ "bitflags 1.3.2", "bitvec", "frame-benchmarking", + "frame-election-provider-support", "frame-support", "frame-support-test", "frame-system", @@ -15496,6 +15868,7 @@ dependencies = [ "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", + "pallet-election-provider-multi-block", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", @@ -15548,6 +15921,11 @@ dependencies = [ "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", + "pallet-staking-async", + "pallet-staking-async-ah-client", + "pallet-staking-async-rc-client", + "pallet-staking-async-reward-fn", + "pallet-staking-async-runtime-api", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", @@ -26913,6 +27291,8 @@ dependencies = [ "pallet-session", "pallet-session-benchmarking", "pallet-staking", + "pallet-staking-async-ah-client", + "pallet-staking-async-rc-client", "pallet-staking-runtime-api", "pallet-sudo", "pallet-timestamp", diff --git a/Cargo.toml b/Cargo.toml index 8b4c871c1b97e..0f9d90c8eed2d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -352,6 +352,7 @@ members = [ "substrate/frame/core-fellowship", "substrate/frame/delegated-staking", "substrate/frame/democracy", + "substrate/frame/election-provider-multi-block", "substrate/frame/election-provider-multi-phase", "substrate/frame/election-provider-multi-phase/test-staking-e2e", "substrate/frame/election-provider-support", @@ -427,6 +428,15 @@ members = [ "substrate/frame/session/benchmarking", "substrate/frame/society", "substrate/frame/staking", + "substrate/frame/staking-async", + "substrate/frame/staking-async/ah-client", + "substrate/frame/staking-async/ahm-test", + "substrate/frame/staking-async/rc-client", + "substrate/frame/staking-async/reward-fn", + "substrate/frame/staking-async/runtime-api", + "substrate/frame/staking-async/runtimes/parachain", + "substrate/frame/staking-async/runtimes/rc", + "substrate/frame/staking-async/runtimes/rc/constants", "substrate/frame/staking/reward-curve", "substrate/frame/staking/reward-fn", "substrate/frame/staking/runtime-api", @@ -945,6 +955,7 @@ pallet-default-config-example = { path = "substrate/frame/examples/default-confi pallet-delegated-staking = { path = "substrate/frame/delegated-staking", default-features = false } pallet-democracy = { path = "substrate/frame/democracy", default-features = false } pallet-dev-mode = { path = "substrate/frame/examples/dev-mode", default-features = false } +pallet-election-provider-multi-block = { path = "substrate/frame/election-provider-multi-block", default-features = false } pallet-election-provider-multi-phase = { path = "substrate/frame/election-provider-multi-phase", default-features = false } pallet-election-provider-support-benchmarking = { path = "substrate/frame/election-provider-support/benchmarking", default-features = false } pallet-elections-phragmen = { path = "substrate/frame/elections-phragmen", default-features = false } @@ -1014,6 +1025,16 @@ pallet-staking = { path = "substrate/frame/staking", default-features = false } pallet-staking-reward-curve = { path = "substrate/frame/staking/reward-curve", default-features = false } pallet-staking-reward-fn = { path = "substrate/frame/staking/reward-fn", default-features = false } pallet-staking-runtime-api = { path = "substrate/frame/staking/runtime-api", default-features = false } +# TODO: remove the reward stuff as they are not needed here +pallet-staking-async = { path = "substrate/frame/staking-async", default-features = false } +pallet-staking-async-ah-client = { path = "substrate/frame/staking-async/ah-client", default-features = false } +pallet-staking-async-parachain-runtime = { path = "substrate/frame/staking-async/runtimes/parachain" } +pallet-staking-async-rc-client = { path = "substrate/frame/staking-async/rc-client", default-features = false } +pallet-staking-async-rc-runtime = { path = "substrate/frame/staking-async/runtimes/rc" } +pallet-staking-async-rc-runtime-constants = { path = "substrate/frame/staking-async/runtimes/rc/constants", default-features = false } +pallet-staking-async-reward-curve = { path = "substrate/frame/staking-async/reward-curve", default-features = false } +pallet-staking-async-reward-fn = { path = "substrate/frame/staking-async/reward-fn", default-features = false } +pallet-staking-async-runtime-api = { path = "substrate/frame/staking-async/runtime-api", default-features = false } pallet-state-trie-migration = { path = "substrate/frame/state-trie-migration", default-features = false } pallet-statement = { default-features = false, path = "substrate/frame/statement" } pallet-sudo = { path = "substrate/frame/sudo", default-features = false } diff --git a/cumulus/pallets/collator-selection/src/mock.rs b/cumulus/pallets/collator-selection/src/mock.rs index 002baea02d60d..664ba56459ae9 100644 --- a/cumulus/pallets/collator-selection/src/mock.rs +++ b/cumulus/pallets/collator-selection/src/mock.rs @@ -117,7 +117,6 @@ impl pallet_session::SessionHandler for TestSessionHandler { } fn on_new_session(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) { SessionChangeBlock::set(System::block_number()); - dbg!(keys.len()); SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::>()) } fn on_before_session_ending() {} diff --git a/docs/sdk/src/guides/your_first_node.rs b/docs/sdk/src/guides/your_first_node.rs index e289734d3df8b..476c8ccbf35d4 100644 --- a/docs/sdk/src/guides/your_first_node.rs +++ b/docs/sdk/src/guides/your_first_node.rs @@ -335,7 +335,7 @@ mod tests { .output() .unwrap(); - // atleast blocks should be imported + // at least blocks should be imported assert!(String::from_utf8(output.stderr) .unwrap() .contains(format!("Imported #{}", 7).to_string().as_str())); diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 3df352733b5f1..40a62837dedae 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -73,7 +73,7 @@ const MERKLE_PROOF_MAX_DEPTH: usize = 8; /// The bomb limit for decompressing code blobs. #[deprecated( - note = "`VALIDATION_CODE_BOMB_LIMIT` will be removed. Use `validation_code_bomb_limit` + note = "`VALIDATION_CODE_BOMB_LIMIT` will be removed. Use `validation_code_bomb_limit` runtime API to retrieve the value from the runtime" )] pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize; diff --git a/polkadot/parachain/src/primitives.rs b/polkadot/parachain/src/primitives.rs index 73844d3c76fd2..73d060c1adaca 100644 --- a/polkadot/parachain/src/primitives.rs +++ b/polkadot/parachain/src/primitives.rs @@ -163,7 +163,7 @@ pub struct BlockData(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec Ord, PartialEq, PartialOrd, - RuntimeDebug, + Debug, serde::Serialize, serde::Deserialize, TypeInfo, diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 05b46062d28d9..5cf22bbd38c38 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -47,6 +47,9 @@ pallet-session = { workspace = true } pallet-staking = { workspace = true } pallet-timestamp = { workspace = true } +# only used in benchmarking +frame-election-provider-support = { workspace = true, optional = true } + polkadot-primitives = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } @@ -80,6 +83,7 @@ std = [ "bitvec/std", "codec/std", "frame-benchmarking?/std", + "frame-election-provider-support?/std", "frame-support/std", "frame-system/std", "log/std", @@ -118,6 +122,7 @@ std = [ ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", "frame-support-test/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", @@ -139,6 +144,7 @@ runtime-benchmarks = [ "xcm/runtime-benchmarks", ] try-runtime = [ + "frame-election-provider-support?/try-runtime", "frame-support-test/try-runtime", "frame-support/try-runtime", "frame-system/try-runtime", diff --git a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs index bfd46d7524385..1d1c624f43444 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs @@ -82,6 +82,11 @@ where pallet_session::Pallet::::on_initialize(BlockNumberFor::::one()); initializer::Pallet::::on_initialize(BlockNumberFor::::one()); + + // signal to `pallet-staking`'s `ElectionProvider` to be ready asap. + use frame_election_provider_support::ElectionProvider; + <::ElectionProvider as ElectionProvider>::asap(); + // skip sessions until the new validator set is enacted while pallet_session::Pallet::::validators().len() < n as usize { pallet_session::Pallet::::rotate_session(); diff --git a/polkadot/runtime/parachains/src/origin.rs b/polkadot/runtime/parachains/src/origin.rs index a99c56e78968a..88318aa3dfc21 100644 --- a/polkadot/runtime/parachains/src/origin.rs +++ b/polkadot/runtime/parachains/src/origin.rs @@ -59,7 +59,7 @@ pub mod pallet { Encode, Decode, DecodeWithMemTracking, - sp_core::RuntimeDebug, + Debug, scale_info::TypeInfo, MaxEncodedLen, )] diff --git a/polkadot/runtime/parachains/src/reward_points.rs b/polkadot/runtime/parachains/src/reward_points.rs index 69ef2db756c21..789201aad9f34 100644 --- a/polkadot/runtime/parachains/src/reward_points.rs +++ b/polkadot/runtime/parachains/src/reward_points.rs @@ -23,7 +23,7 @@ use crate::{session_info, shared}; use alloc::collections::btree_set::BTreeSet; -use frame_support::traits::{Defensive, ValidatorSet}; +use frame_support::traits::{Defensive, RewardsReporter, ValidatorSet}; use polkadot_primitives::{SessionIndex, ValidatorIndex}; /// The amount of era points given by backing a candidate that is included. @@ -32,12 +32,13 @@ pub const BACKING_POINTS: u32 = 20; pub const DISPUTE_STATEMENT_POINTS: u32 = 20; /// Rewards validators for participating in parachains with era points in pallet-staking. -pub struct RewardValidatorsWithEraPoints(core::marker::PhantomData); +pub struct RewardValidatorsWithEraPoints(core::marker::PhantomData<(C, R)>); -impl RewardValidatorsWithEraPoints +impl RewardValidatorsWithEraPoints where - C: pallet_staking::Config + session_info::Config, + C: session_info::Config, C::ValidatorSet: ValidatorSet, + R: RewardsReporter, { /// Reward validators in session with points, but only if they are in the active set. fn reward_only_active( @@ -61,14 +62,15 @@ where .filter(|v| active_set.contains(v)) .map(|v| (v, points)); - >::reward_by_ids(rewards); + R::reward_by_ids(rewards); } } -impl crate::inclusion::RewardValidators for RewardValidatorsWithEraPoints +impl crate::inclusion::RewardValidators for RewardValidatorsWithEraPoints where - C: pallet_staking::Config + shared::Config + session_info::Config, + C: shared::Config + session_info::Config, C::ValidatorSet: ValidatorSet, + R: RewardsReporter, { fn reward_backing(indices: impl IntoIterator) { let session_index = shared::CurrentSessionIndex::::get(); @@ -78,10 +80,11 @@ where fn reward_bitfields(_validators: impl IntoIterator) {} } -impl crate::disputes::RewardValidators for RewardValidatorsWithEraPoints +impl crate::disputes::RewardValidators for RewardValidatorsWithEraPoints where - C: pallet_staking::Config + session_info::Config, + C: session_info::Config, C::ValidatorSet: ValidatorSet, + R: RewardsReporter, { fn reward_dispute_statement( session: SessionIndex, diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index ba33cc9ecfa0d..950723d451e2b 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -494,6 +494,7 @@ impl sp_runtime::traits::Convert> for FullIdentificationOf } impl pallet_session::historical::Config for Runtime { + type RuntimeEvent = RuntimeEvent; type FullIdentification = (); type FullIdentificationOf = FullIdentificationOf; } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 575b64aab85de..809cead55f2d7 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -79,7 +79,7 @@ use polkadot_runtime_common::{ use polkadot_runtime_parachains::reward_points::RewardValidatorsWithEraPoints; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use sp_core::{ConstU32, OpaqueMetadata}; +use sp_core::{ConstBool, ConstU32, OpaqueMetadata}; use sp_mmr_primitives as mmr; use sp_runtime::{ curve::PiecewiseLinear, @@ -312,7 +312,7 @@ impl_opaque_keys! { impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type ShouldEndSession = Babe; type NextSessionRotation = Babe; type SessionManager = Staking; @@ -323,8 +323,9 @@ impl pallet_session::Config for Runtime { } impl pallet_session::historical::Config for Runtime { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } pallet_staking_reward_curve::build! { @@ -349,7 +350,7 @@ parameter_types! { pub const MaxExposurePageSize: u32 = 64; pub const MaxNominators: u32 = 256; pub const MaxAuthorities: u32 = 100_000; - pub const OnChainMaxWinners: u32 = u32::MAX; + pub const OnChainMaxWinners: u32 = MaxAuthorities::get(); // Unbounded number of election targets and voters. pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -362,7 +363,9 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type Bounds = ElectionBoundsOnChain; - type MaxWinners = OnChainMaxWinners; + type MaxWinnersPerPage = OnChainMaxWinners; + type MaxBackersPerWinner = ConstU32<{ u32::MAX }>; + type Sort = ConstBool; } /// Upper limit on the number of NPOS nominations. @@ -400,6 +403,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (); type WeightInfo = (); + type MaxValidatorSet = MaxAuthorities; type Filter = frame_support::traits::Nothing; } @@ -530,7 +534,7 @@ impl parachains_shared::Config for Runtime { impl parachains_inclusion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type DisputesHandler = ParasDisputes; - type RewardValidators = RewardValidatorsWithEraPoints; + type RewardValidators = RewardValidatorsWithEraPoints; type MessageQueue = (); type WeightInfo = (); } diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index a29fb95442ec4..7c532db80d557 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -83,6 +83,8 @@ pallet-root-testing = { workspace = true } pallet-scheduler = { workspace = true } pallet-session = { workspace = true } pallet-staking = { workspace = true } +pallet-staking-async-ah-client = { workspace = true } +pallet-staking-async-rc-client = { workspace = true } pallet-staking-runtime-api = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } @@ -180,6 +182,8 @@ std = [ "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", + "pallet-staking-async-ah-client/std", + "pallet-staking-async-rc-client/std", "pallet-staking-runtime-api/std", "pallet-staking/std", "pallet-sudo/std", @@ -264,6 +268,8 @@ runtime-benchmarks = [ "pallet-referenda/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-session-benchmarking/runtime-benchmarks", + "pallet-staking-async-ah-client/runtime-benchmarks", + "pallet-staking-async-rc-client/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", @@ -323,6 +329,8 @@ try-runtime = [ "pallet-root-testing/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", + "pallet-staking-async-ah-client/try-runtime", + "pallet-staking-async-rc-client/try-runtime", "pallet-staking/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 80d3fa139bbe8..8e8490885766e 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -33,6 +33,7 @@ use frame_support::{ derive_impl, dynamic_params::{dynamic_pallet_params, dynamic_params}, genesis_builder_helper::{build_state, get_preset}, + pallet_prelude::PhantomData, parameter_types, traits::{ fungible::HoldConsideration, tokens::UnityOrOuterConversion, ConstU32, Contains, EitherOf, @@ -48,6 +49,9 @@ use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; use pallet_nomination_pools::PoolId; use pallet_session::historical as session_historical; +use pallet_staking::UseValidatorsMap; +use pallet_staking_async_ah_client as ah_client; +use pallet_staking_async_rc_client as rc_client; use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; use polkadot_primitives::{ slashing, @@ -96,7 +100,7 @@ use sp_consensus_beefy::{ ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, mmr::{BeefyDataProvider, MmrLeafVersion}, }; -use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; +use sp_core::{ConstBool, ConstU8, OpaqueMetadata, RuntimeDebug, H256}; use sp_runtime::{ generic, impl_opaque_keys, traits::{ @@ -106,7 +110,8 @@ use sp_runtime::{ transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, MultiSignature, MultiSigner, Percent, Permill, }; -use sp_staking::SessionIndex; +use sp_staking::{EraIndex, SessionIndex}; + #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -124,7 +129,6 @@ use xcm_runtime_apis::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_election_provider_multi_phase::{Call as EPMCall, GeometricDepositBase}; -use pallet_staking::UseValidatorsMap; pub use pallet_timestamp::Call as TimestampCall; use sp_runtime::traits::Get; #[cfg(any(feature = "std", test))] @@ -134,7 +138,7 @@ pub use sp_runtime::BuildStorage; use westend_runtime_constants::{ currency::*, fee::*, - system_parachain::{coretime::TIMESLICE_PERIOD, BROKER_ID}, + system_parachain::{coretime::TIMESLICE_PERIOD, ASSET_HUB_ID, BROKER_ID}, time::*, }; @@ -504,7 +508,7 @@ impl pallet_timestamp::Config for Runtime { impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = Staking; + type EventHandler = AssetHubStakingClient; } parameter_types! { @@ -526,10 +530,10 @@ impl_opaque_keys! { impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = ConvertInto; type ShouldEndSession = Babe; type NextSessionRotation = Babe; - type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type SessionManager = session_historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy; @@ -537,8 +541,9 @@ impl pallet_session::Config for Runtime { } impl pallet_session::historical::Config for Runtime { - type FullIdentification = pallet_staking::ExistenceOrLegacyExposure; - type FullIdentificationOf = pallet_staking::ExistenceOrLegacyExposureOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = sp_staking::Exposure; + type FullIdentificationOf = pallet_staking::DefaultExposureOf; } pub struct MaybeSignedPhase; @@ -586,7 +591,10 @@ parameter_types! { ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); // Maximum winners that can be chosen as active validators pub const MaxActiveValidators: u32 = 1000; - + // One page only, fill the whole page with the `MaxActiveValidators`. + pub const MaxWinnersPerPage: u32 = MaxActiveValidators::get(); + // Unbonded, thus the max backers per winner maps to the max electing voters limit. + pub const MaxBackersPerWinner: u32 = MaxElectingVoters::get(); } frame_election_provider_support::generate_solution_type!( @@ -601,12 +609,14 @@ frame_election_provider_support::generate_solution_type!( pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { + type Sort = ConstBool; type System = Runtime; type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; - type MaxWinners = MaxActiveValidators; type Bounds = ElectionBounds; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -619,7 +629,8 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime { as frame_election_provider_support::ElectionDataProvider >::MaxVotesPerVoter; - type MaxWinners = MaxActiveValidators; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinners = MaxWinnersPerPage; // The unsigned submissions have to respect the weight of the submit_unsigned call, thus their // weight estimate function is wired to this call's weight. @@ -653,6 +664,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BetterSignedThreshold = (); type OffchainRepeat = OffchainRepeat; type MinerTxPriority = NposSolutionPriority; + type MaxWinners = MaxWinnersPerPage; + type MaxBackersPerWinner = MaxBackersPerWinner; type DataProvider = Staking; #[cfg(any(feature = "fast-runtime", feature = "runtime-benchmarks"))] type Fallback = onchain::OnChainExecution; @@ -661,7 +674,8 @@ impl pallet_election_provider_multi_phase::Config for Runtime { AccountId, BlockNumber, Staking, - MaxActiveValidators, + MaxWinnersPerPage, + MaxBackersPerWinner, )>; type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen< @@ -672,7 +686,6 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::elections::BenchmarkConfig; type ForceOrigin = EnsureRoot; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; - type MaxWinners = MaxActiveValidators; type ElectionBounds = ElectionBounds; } @@ -719,9 +732,9 @@ parameter_types! { // Six sessions in an era (6 hours). pub const SessionsPerEra: SessionIndex = prod_or_fast!(6, 1); // 2 eras for unbonding (12 hours). - pub const BondingDuration: sp_staking::EraIndex = 2; + pub const BondingDuration: EraIndex = 2; // 1 era in which slashes can be cancelled (6 hours). - pub const SlashDeferDuration: sp_staking::EraIndex = 1; + pub const SlashDeferDuration: EraIndex = 1; pub const MaxExposurePageSize: u32 = 64; // Note: this is not really correct as Max Nominators is (MaxExposurePageSize * page_count) but // this is an unbounded number. We just set it to a reasonably high value, 1 full page @@ -754,6 +767,7 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type TargetList = UseValidatorsMap; + type MaxValidatorSet = MaxActiveValidators; type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; @@ -761,9 +775,114 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (NominationPools, DelegatedStaking); type WeightInfo = weights::pallet_staking::WeightInfo; + // TODO: Set this to everything once AHM migration starts. type Filter = Nothing; } +#[derive(Encode, Decode)] +enum AssetHubRuntimePallets { + // TODO - AHM: check index + #[codec(index = 89)] + RcClient(RcClientCalls), +} + +#[derive(Encode, Decode)] +enum RcClientCalls { + // TODO - AHM: check index + #[codec(index = 0)] + RelaySessionReport(rc_client::SessionReport), + // TODO - AHM: check index + #[codec(index = 1)] + RelayNewOffence(SessionIndex, Vec>), +} + +pub struct AssetHubLocation; +impl Get for AssetHubLocation { + fn get() -> Location { + Location::new(0, [Junction::Parachain(ASSET_HUB_ID)]) + } +} + +pub struct XcmToAssetHub(PhantomData); +impl ah_client::SendToAssetHub for XcmToAssetHub { + type AccountId = AccountId; + + fn relay_session_report(session_report: rc_client::SessionReport) { + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Self::mk_asset_hub_call(RcClientCalls::RelaySessionReport(session_report)), + ]); + if let Err(err) = send_xcm::(AssetHubLocation::get(), message) { + log::error!(target: "runtime", "Failed to send relay session report message: {:?}", err); + } + } + + fn relay_new_offence( + session_index: SessionIndex, + offences: Vec>, + ) { + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Self::mk_asset_hub_call(RcClientCalls::RelayNewOffence(session_index, offences)), + ]); + if let Err(err) = send_xcm::(AssetHubLocation::get(), message) { + log::error!(target: "runtime", "Failed to send relay offence message: {:?}", err); + } + } +} + +impl XcmToAssetHub { + fn mk_asset_hub_call( + call: RcClientCalls<::AccountId>, + ) -> Instruction<()> { + Instruction::Transact { + origin_kind: OriginKind::Superuser, + fallback_max_weight: None, + call: AssetHubRuntimePallets::RcClient(call).encode().into(), + } + } +} + +pub struct EnsureAssetHub; +impl frame_support::traits::EnsureOrigin for EnsureAssetHub { + type Success = (); + fn try_origin(o: RuntimeOrigin) -> Result { + match >>::into( + o.clone(), + ) { + Ok(parachains_origin::Origin::Parachain(id)) if id == ASSET_HUB_ID.into() => Ok(()), + _ => Err(o), + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(RuntimeOrigin::root()) + } +} + +// TODO - AHM: this pallet is currently in place, but does nothing. Upon AHM, it should become +// activated. Note that it is used as `SessionManager`, but since its mode is `Passive`, it will +// delegate all of its tasks to `Fallback`, which is again `Staking`. +impl ah_client::Config for Runtime { + type CurrencyBalance = Balance; + type AssetHubOrigin = + frame_support::traits::EitherOfDiverse, EnsureAssetHub>; + type AdminOrigin = EnsureRoot; + type SessionInterface = Self; + type SendToAssetHub = XcmToAssetHub; + type MinimumValidatorSetSize = ConstU32<4>; + type UnixTime = Timestamp; + type PointsPerBlock = ConstU32<20>; + type Fallback = Staking; +} + impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; @@ -837,8 +956,8 @@ impl pallet_treasury::Config for Runtime { impl pallet_offences::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type IdentificationTuple = pallet_session::historical::IdentificationTuple; - type OnOffenceHandler = Staking; + type IdentificationTuple = session_historical::IdentificationTuple; + type OnOffenceHandler = AssetHubStakingClient; } impl pallet_authority_discovery::Config for Runtime { @@ -1244,7 +1363,8 @@ impl parachains_session_info::Config for Runtime { impl parachains_inclusion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type DisputesHandler = ParasDisputes; - type RewardValidators = parachains_reward_points::RewardValidatorsWithEraPoints; + type RewardValidators = + parachains_reward_points::RewardValidatorsWithEraPoints; type MessageQueue = MessageQueue; type WeightInfo = weights::polkadot_runtime_parachains_inclusion::WeightInfo; } @@ -1416,7 +1536,8 @@ impl assigned_slots::Config for Runtime { impl parachains_disputes::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type RewardValidators = parachains_reward_points::RewardValidatorsWithEraPoints; + type RewardValidators = + parachains_reward_points::RewardValidatorsWithEraPoints; type SlashingHandler = parachains_slashing::SlashValidatorsForDisputes; type WeightInfo = weights::polkadot_runtime_parachains_disputes::WeightInfo; } @@ -1806,6 +1927,8 @@ mod runtime { pub type AssignedSlots = assigned_slots; #[runtime::pallet_index(66)] pub type Coretime = coretime; + #[runtime::pallet_index(67)] + pub type AssetHubStakingClient = pallet_staking_async_ah_client; // Migrations pallet #[runtime::pallet_index(98)] diff --git a/prdoc/pr_8127.prdoc b/prdoc/pr_8127.prdoc new file mode 100644 index 0000000000000..59688f2a24404 --- /dev/null +++ b/prdoc/pr_8127.prdoc @@ -0,0 +1,184 @@ +title: '[AHM] Async Staking module across AH and RC' +doc: +- audience: Runtime Dev + description: |- + This PR is the final outcome of a multi-month development period, with a lot of background work + since 2022. Its main aim is to make pallet-staking, alongside its `type ElectionProvider` + compatible to be used in a parachain, and report back the validator set to a relay-chain. + + This setup is intended to be used for Polkadot, Kusama and Westend relay-chains, with the + corresponding AssetHubs hosting the staking system. + + While this PR is quite big, a lot of the diffs are due to adding a relay and parachain runtime + for testing. The following is a guide to help reviewers/auditors distinguish what has actually + changed in this PR. + + ## Added + + This PR adds the following new pallets, all of which are not used anywhere yet, with the + exception of one (see `westend-runtime` changes below). + + #### `pallet-election-provider-multi-block` + + This is a set of 4 pallets, capable of implementing an async, multi-page `ElectionProvider`. + This pallet is not used in any real runtime yet, and is intended to be used in `AssetHub`, next + to `pallet-staking-async`. + + #### `pallet-staking-async` + + A fork of the old `pallet-staking`, with a number of key differences, making it suitable to be + used in a parachain: + + 1. It no longer has access to a secure timestamp, previously used to calculate the duration of + an era. 2. It no longer has access to a `pallet-session`. 2. It no longer has access to a + `pallet-authorship`. 3. It is capable of working with a multi-page `ElectionProvider`, aka. + `pallet-election-provider-multi-block`. + + To compensate for the above, this pallet relies on XCM messages coming from the relay-chain, + informing the pallet of: + + * When a new era should be activated, and how long its duration was + * When an offence has happened on the relay relay-chain + * When a session ends on the relay-chain, and how many reward points were accumulated for each + validators during that period. + + #### `pallet-staking-async-ah-client` and `pallet-staking-async-rc-client` + + Are the two new pallets that facilitate the above communication. + + #### `pallet-ahm-test` + + A test-only crate that contains e2e rust-based unit test for all of the above. + + #### `pallet-staking-async-rc-runtime` and `pallet-staking-async-parachain-runtime` + + Forks of westend and westend-asset-hub, customized to be used for testing all of the above with + Zombienet. It contains a lot of unrelated code as well. + + ## Changed + + #### `Identification` + + This mechanism, which lives on the relay-chain, is expressed by `type FullIdentification` and `type FullIdentificationOf` in runtimes. It is a way to identify the full data needed to slash a validator. Historically, it was pointing to a validator, and their `struct Exposure`. With the move to Asset-Hub, this is no longer possible for two reasons: + + 1. Relay chain no longer knows the full exposures + 2. Even if, the full exposures are getting bigger and bigger and relying the entirety of it is not scalable. + + Instead, runtimes now move to a new `type FullIdentificationOf = DefaultExposureOf`, which will identify a validator with a `Exposure::default()`. This is suboptimal, as it forces us to still store a number of bytes. Yet, it allows any old `FullIdentification`, pertaining to an old slash, to be decoded. This compromise is only needed to cater for slashes that happen around the time of AHM. + + #### `westend-runtime` + + This runtime already has the `pallet-staking-async-ah-client`, integrated into all the places such that: + + 1. It handles the validator reward points + 2. It handles offences + 3. It is the `SessionManager` + + Yet, it is delegating all of the above to its `type Fallback`, which is the old `pallet-staking`. This is a preparatory step for AHM, and should not be any logical change. + + #### `pallet-election-provider-multi-phase` + + This is the old single-page `ElectionProvider`. It has been updated to work with multi-page traits, yet it only supports `page-size = 1` for now. It should not have seen any logical changes. + + + #### `pallet-bags-list` + + Now has two new features. 1. It can be `Locked`, in which case all updates to it fail with an + `Err(_)`, even deletion of a node. This is needed because we cannot alter any nodes in this + pallet during a multi-page iteration, aka. multi-page snapshot. 2. To combat this, the same + `rebag` transaction can be also be used to remove a node from the list, or add a node to the + list. This is done through the `score_of` api. + + See the file changes and tests under `./substrate/frame/bags-list` for more info. + + #### RuntimeDebug -> Debug + + To facilitate debugging, a number of types' `RuntimeDebug` impl has been changed to `Debug`. See https://github.com/paritytech/polkadot-sdk/pull/3107 + +crates: +- name: pallet-bags-list + bump: major +- name: pallet-staking + bump: major +- name: pallet-election-provider-multi-block + bump: patch +- name: frame-election-provider-support + bump: major +- name: polkadot-node-primitives + bump: patch +- name: pallet-election-provider-multi-phase + bump: major +- name: pallet-fast-unstake + bump: patch +- name: pallet-babe + bump: patch +- name: pallet-beefy + bump: patch +- name: pallet-delegated-staking + bump: patch +- name: pallet-grandpa + bump: patch +- name: pallet-offences-benchmarking + bump: patch +- name: pallet-root-offences + bump: major +- name: pallet-session-benchmarking + bump: patch +- name: frame-support + bump: minor +- name: westend-runtime + bump: major +- name: polkadot-parachain-primitives + bump: patch +- name: polkadot-runtime-parachains + bump: minor +- name: polkadot + bump: patch +- name: pallet-session + bump: major +- name: frame-support-procedural + bump: patch +- name: sp-runtime + bump: patch +- name: polkadot-sdk-frame + bump: patch +- name: pallet-elections-phragmen + bump: patch +- name: pallet-nomination-pools-benchmarking + bump: patch +- name: sp-npos-elections + bump: major +- name: sp-staking + bump: minor +- name: polkadot-sdk + bump: minor +- name: pallet-staking-reward-fn + bump: patch +- name: pallet-staking-async + bump: patch +- name: pallet-staking-async-ah-client + bump: patch +- name: pallet-staking-async-rc-client + bump: patch +- name: pallet-staking-async-reward-fn + bump: patch +- name: pallet-staking-async-runtime-api + bump: patch +- name: pallet-staking-async-parachain-runtime + bump: patch +- name: pallet-staking-async-rc-runtime + bump: patch +- name: pallet-staking-async-rc-runtime-constants + bump: patch +- name: rococo-runtime + bump: major +- name: pallet-authority-discovery + bump: patch +- name: pallet-im-online + bump: patch +- name: pallet-collator-selection + bump: patch +- name: pallet-beefy-mmr + bump: patch +- name: pallet-nomination-pools + bump: patch diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index fe2c2b780ea6f..e4d03519a770f 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -314,8 +314,6 @@ pub fn testnet_genesis( let (initial_authorities, endowed_accounts, stakers) = configure_accounts(initial_authorities, initial_nominators, endowed_accounts, STASH); - let staking_playground_config = None; - // Todo: After #7748 is done, we can refactor this to avoid // calling into the native runtime. kitchensink_runtime::genesis_config_presets::kitchensink_genesis( @@ -340,7 +338,6 @@ pub fn testnet_genesis( root_key, endowed_accounts, stakers, - staking_playground_config, ) } diff --git a/substrate/bin/node/runtime/src/genesis_config_presets.rs b/substrate/bin/node/runtime/src/genesis_config_presets.rs index 25627120c8220..626efce76d666 100644 --- a/substrate/bin/node/runtime/src/genesis_config_presets.rs +++ b/substrate/bin/node/runtime/src/genesis_config_presets.rs @@ -41,11 +41,6 @@ use sp_runtime::Perbill; pub const ENDOWMENT: Balance = 10_000_000 * DOLLARS; pub const STASH: Balance = ENDOWMENT / 1000; -pub struct StakingPlaygroundConfig { - pub validator_count: u32, - pub minimum_validator_count: u32, -} - /// The staker type as supplied ot the Staking config. pub type Staker = (AccountId, AccountId, Balance, StakerStatus); @@ -55,15 +50,9 @@ pub fn kitchensink_genesis( root_key: AccountId, endowed_accounts: Vec, stakers: Vec, - staking_playground_config: Option, ) -> serde_json::Value { - let (validator_count, min_validator_count) = match staking_playground_config { - Some(c) => (c.validator_count, c.minimum_validator_count), - None => { - let authorities_count = initial_authorities.len() as u32; - (authorities_count, authorities_count) - }, - }; + let validator_count = initial_authorities.len() as u32; + let minimum_validator_count = validator_count; let collective = collective(&endowed_accounts); @@ -80,7 +69,7 @@ pub fn kitchensink_genesis( }, staking: StakingConfig { validator_count, - minimum_validator_count: min_validator_count, + minimum_validator_count, invulnerables: initial_authorities .iter() .map(|x| x.0.clone()) @@ -126,7 +115,6 @@ pub fn get_preset(id: &PresetId) -> Option> { alice.clone(), endowed, vec![validator(alice_stash.clone())], - None, ), sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => kitchensink_genesis( vec![ @@ -138,7 +126,6 @@ pub fn get_preset(id: &PresetId) -> Option> { alice, endowed, vec![validator(alice_stash), validator(bob_stash)], - None, ), _ => return None, }; diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 723bead6a1c1b..7f3520967c2f0 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -681,7 +681,7 @@ impl_opaque_keys! { impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type ShouldEndSession = Babe; type NextSessionRotation = Babe; type SessionManager = pallet_session::historical::NoteHistoricalRoot; @@ -693,8 +693,9 @@ impl pallet_session::Config for Runtime { } impl pallet_session::historical::Config for Runtime { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } pallet_staking_reward_curve::build! { @@ -724,7 +725,7 @@ const MAX_QUOTA_NOMINATIONS: u32 = 16; pub struct StakingBenchmarkingConfig; impl pallet_staking::BenchmarkingConfig for StakingBenchmarkingConfig { - type MaxNominators = ConstU32<1000>; + type MaxNominators = ConstU32<5000>; type MaxValidators = ConstU32<1000>; } @@ -764,6 +765,7 @@ impl pallet_staking::Config for Runtime { type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; type Filter = Nothing; + type MaxValidatorSet = ConstU32<1000>; } impl pallet_fast_unstake::Config for Runtime { @@ -776,7 +778,6 @@ impl pallet_fast_unstake::Config for Runtime { type MaxErasToCheckPerBlock = ConstU32<1>; type WeightInfo = (); } - parameter_types! { // phase durations. 1/4 of the last session for each. pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; @@ -867,15 +868,14 @@ impl Get> for OffchainRandomBalancing { pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { + type Sort = ConstBool; type System = Runtime; - type Solver = SequentialPhragmen< - AccountId, - pallet_election_provider_multi_phase::SolutionAccuracyOf, - >; - type DataProvider = ::DataProvider; + type Solver = SequentialPhragmen>; + type DataProvider = Staking; type WeightInfo = frame_election_provider_support::weights::SubstrateWeight; - type MaxWinners = ::MaxWinners; type Bounds = ElectionBoundsOnChain; + type MaxBackersPerWinner = MaxElectingVotersSolution; + type MaxWinnersPerPage = MaxActiveValidators; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -886,6 +886,7 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime { type MaxVotesPerVoter = <::DataProvider as ElectionDataProvider>::MaxVotesPerVoter; type MaxWinners = MaxActiveValidators; + type MaxBackersPerWinner = MaxElectingVotersSolution; // The unsigned submissions have to respect the weight of the submit_unsigned call, thus their // weight estimate function is wired to this call's weight. @@ -927,6 +928,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type ElectionBounds = ElectionBoundsMultiPhase; type BenchmarkingConfig = ElectionProviderBenchmarkConfig; type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; + type MaxBackersPerWinner = MaxElectingVotersSolution; } parameter_types! { @@ -1218,8 +1220,8 @@ parameter_types! { pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; pub const MaxVotesPerVoter: u32 = 16; - pub const MaxVoters: u32 = 512; - pub const MaxCandidates: u32 = 64; + pub const MaxVoters: u32 = 256; + pub const MaxCandidates: u32 = 128; pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } @@ -1499,7 +1501,7 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); /// We prioritize im-online heartbeats over election solution submission. pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; - pub const MaxAuthorities: u32 = 100; + pub const MaxAuthorities: u32 = 1000; pub const MaxKeys: u32 = 10_000; pub const MaxPeerInHeartbeats: u32 = 10_000; } @@ -2766,8 +2768,6 @@ mod runtime { #[runtime::pallet_index(84)] pub type AssetsFreezer = pallet_assets_freezer::Pallet; - // Indices 85-88 (including) are reserved for multi block election pallets - #[runtime::pallet_index(89)] pub type MetaTx = pallet_meta_tx::Pallet; } @@ -3884,9 +3884,7 @@ impl_runtime_apis! { #[cfg(test)] mod tests { use super::*; - use frame_election_provider_support::NposSolution; use frame_system::offchain::CreateSignedTransaction; - use sp_runtime::UpperOf; #[test] fn validate_transaction_submitter_bounds() { @@ -3899,17 +3897,6 @@ mod tests { is_submit_signed_transaction::(); } - #[test] - fn perbill_as_onchain_accuracy() { - type OnChainAccuracy = - <::Solution as NposSolution>::Accuracy; - let maximum_chain_accuracy: Vec> = (0..MaxNominations::get()) - .map(|_| >::from(OnChainAccuracy::one().deconstruct())) - .collect(); - let _: UpperOf = - maximum_chain_accuracy.iter().fold(0, |acc, x| acc.checked_add(*x).unwrap()); - } - #[test] fn call_size() { let size = core::mem::size_of::(); diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs index 6883a07ad2891..1914ae4cf9ad2 100644 --- a/substrate/frame/authority-discovery/src/lib.rs +++ b/substrate/frame/authority-discovery/src/lib.rs @@ -214,11 +214,6 @@ mod tests { type WeightInfo = (); } - impl pallet_session::historical::Config for Test { - type FullIdentification = (); - type FullIdentificationOf = (); - } - pub type BlockNumber = u64; parameter_types! { diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index 8673e08472eb0..0fa83b391145c 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -40,6 +40,7 @@ pallet-offences = { workspace = true, default-features = true } pallet-staking = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index cfbbbf5dd0e18..dcfcee25e7b88 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -31,7 +31,7 @@ use pallet_session::historical as pallet_session_historical; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ crypto::{Pair, VrfSecret}, - U256, + ConstBool, U256, }; use sp_io; use sp_runtime::{ @@ -94,7 +94,7 @@ impl_opaque_keys! { impl pallet_session::Config for Test { type RuntimeEvent = RuntimeEvent; type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type ShouldEndSession = Babe; type NextSessionRotation = Babe; type SessionManager = pallet_session::historical::NoteHistoricalRoot; @@ -105,8 +105,9 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } impl pallet_authorship::Config for Test { @@ -121,9 +122,10 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } +type Balance = u128; #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u128; + type Balance = Balance; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; } @@ -152,7 +154,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; + type MaxWinnersPerPage = ConstU32<100>; + type MaxBackersPerWinner = ConstU32<100>; + type Sort = ConstBool; type Bounds = ElectionsBounds; } @@ -311,6 +315,7 @@ pub fn new_test_ext_with_pairs( } pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index bfcd3540ae3ef..aae36f8608b28 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -876,12 +876,11 @@ fn report_equivocation_after_skipped_epochs_works() { // report the equivocation, in order for the validation to pass the mapping // between epoch index and session index must be checked. - assert!(Babe::report_equivocation_unsigned( + assert_ok!(Babe::report_equivocation_unsigned( RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof - ) - .is_ok()); + )); }) } diff --git a/substrate/frame/bags-list/remote-tests/src/lib.rs b/substrate/frame/bags-list/remote-tests/src/lib.rs index 1f0584fa07e5b..fbf6fe94d57fa 100644 --- a/substrate/frame/bags-list/remote-tests/src/lib.rs +++ b/substrate/frame/bags-list/remote-tests/src/lib.rs @@ -87,7 +87,8 @@ pub fn display_and_check_bags>( for id in bag.std_iter().map(|node| node.std_id().clone()) { let vote_weight = - >::ScoreProvider::score(&id); + >::ScoreProvider::score(&id) + .unwrap(); let vote_weight_thresh_u64: u64 = (*vote_weight_thresh) .try_into() .map_err(|_| "runtime must configure score to at most u64 to use this test") diff --git a/substrate/frame/bags-list/remote-tests/src/migration.rs b/substrate/frame/bags-list/remote-tests/src/migration.rs index 397ff1000eaa1..3d929ee9854b3 100644 --- a/substrate/frame/bags-list/remote-tests/src/migration.rs +++ b/substrate/frame/bags-list/remote-tests/src/migration.rs @@ -52,7 +52,7 @@ pub async fn execute( // run the actual migration let moved = ::VoterList::unsafe_regenerate( pallet_staking::Nominators::::iter().map(|(n, _)| n), - pallet_staking::Pallet::::weight_of_fn(), + Box::new(|x| Some(pallet_staking::Pallet::::weight_of(x))), ); log::info!(target: LOG_TARGET, "Moved {} nominators", moved); diff --git a/substrate/frame/bags-list/remote-tests/src/snapshot.rs b/substrate/frame/bags-list/remote-tests/src/snapshot.rs index 5f999aa0b8b75..f8ba7b8d02433 100644 --- a/substrate/frame/bags-list/remote-tests/src/snapshot.rs +++ b/substrate/frame/bags-list/remote-tests/src/snapshot.rs @@ -22,7 +22,10 @@ use frame_election_provider_support::{ }; use frame_support::traits::PalletInfoAccess; use remote_externalities::{Builder, Mode, OnlineConfig}; -use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; +use sp_runtime::{ + traits::{Block as BlockT, Zero}, + DeserializeOwned, +}; /// Execute create a snapshot from pallet-staking. pub async fn execute(voter_limit: Option, currency_unit: u64, ws_url: String) @@ -70,8 +73,9 @@ where Some(v) => DataProviderBounds { count: Some(CountBound(v as u32)), size: None }, }; + // single page voter snapshot, thus page index == 0. let voters = - as ElectionDataProvider>::electing_voters(bounds) + as ElectionDataProvider>::electing_voters(bounds, Zero::zero()) .unwrap(); let mut voters_nominator_only = voters diff --git a/substrate/frame/bags-list/src/benchmarks.rs b/substrate/frame/bags-list/src/benchmarks.rs index 7db4c4bb359f7..cb8b590666ef6 100644 --- a/substrate/frame/bags-list/src/benchmarks.rs +++ b/substrate/frame/bags-list/src/benchmarks.rs @@ -58,7 +58,7 @@ benchmarks_instance_pallet! { ] ); }: { - let voters = List::::iter(); + let voters = as SortedListProvider>::iter(); let len = voters.collect::>().len(); assert!(len as u32 == n, "len is {}, expected {}", len, n); } @@ -93,11 +93,51 @@ benchmarks_instance_pallet! { ); }: { // this should only go into one of the bags - let voters = List::::iter().take(n as usize / 4 ); + let voters = as SortedListProvider>::iter().take(n as usize / 4 ); let len = voters.collect::>().len(); assert!(len as u32 == n / 4, "len is {}, expected {}", len, n / 4); } + #[extra] + iter_next { + let n = 100; + + // clear any pre-existing storage. + List::::unsafe_clear(); + + // add n nodes, half to first bag and half to second bag. + let bag_thresh = T::BagThresholds::get()[0]; + let second_bag_thresh = T::BagThresholds::get()[1]; + + + for i in 0..n/2 { + let node: T::AccountId = account("node", i, 0); + assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); + } + for i in 0..n/2 { + let node: T::AccountId = account("node", i, 1); + assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); + } + assert_eq!( + List::::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::>(), + vec![ + (bag_thresh, (n / 2) as usize), + (second_bag_thresh, (n / 2) as usize), + ] + ); + }: { + // this should only go into one of the bags + let mut iter_var = as SortedListProvider>::iter(); + let mut voters = Vec::::with_capacity((n/4) as usize); + for _ in 0..(n/4) { + let next = iter_var.next().unwrap(); + voters.push(next); + } + + let len = voters.len(); + assert!(len as u32 == n / 4, "len is {}, expected {}", len, n / 4); + } + #[extra] iter_from { let n = 100; @@ -142,7 +182,7 @@ benchmarks_instance_pallet! { // iter from someone in the 3rd bag, so this should touch ~75 nodes and 3 bags let from: T::AccountId = account("node", 0, 2); }: { - let voters = List::::iter_from(&from).unwrap(); + let voters = as SortedListProvider>::iter_from(&from).unwrap(); let len = voters.collect::>().len(); assert!(len as u32 == 74, "len is {}, expected {}", len, 74); } diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs index 1ebeb26b3fe74..746d798a0cbe5 100644 --- a/substrate/frame/bags-list/src/lib.rs +++ b/substrate/frame/bags-list/src/lib.rs @@ -264,6 +264,13 @@ pub mod pallet { pub type ListBags, I: 'static = ()> = StorageMap<_, Twox64Concat, T::Score, list::Bag>; + /// Lock all updates to this pallet. + /// + /// If any nodes needs updating, removal or addition due to a temporary lock, the + /// [`Call::rebag`] can be used. + #[pallet::storage] + pub type Lock, I: 'static = ()> = StorageValue<_, (), OptionQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(crate) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -277,6 +284,8 @@ pub mod pallet { pub enum Error { /// A error in the list interface implementation. List(ListError), + /// Could not update a node, because the pallet is locked. + Locked, } impl From for Error { @@ -302,9 +311,29 @@ pub mod pallet { pub fn rebag(origin: OriginFor, dislocated: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; let dislocated = T::Lookup::lookup(dislocated)?; - let current_score = T::ScoreProvider::score(&dislocated); - Pallet::::do_rebag(&dislocated, current_score) - .map_err::, _>(Into::into)?; + Self::ensure_unlocked().map_err(|_| Error::::Locked)?; + + let existed = ListNodes::::contains_key(&dislocated); + match (existed, T::ScoreProvider::score(&dislocated)) { + (true, Some(current_score)) => { + // existed and score is updated, maybe rebag. + let _ = Pallet::::do_rebag(&dislocated, current_score) + .map_err::, _>(Into::into)?; + }, + (false, Some(current_score)) => { + // did not exists, and has a score now, insert! + Self::on_insert(dislocated.clone(), current_score) + .map_err::, _>(Into::into)?; + }, + (true, None) => { + // existed, but has no new score now, remove! + Self::on_remove(&dislocated).map_err::, _>(Into::into)?; + }, + (false, None) => { + // did not exists, and has no score now, do nothing. + return Err(Error::::List(ListError::NodeNotFound).into()); + }, + } Ok(()) } @@ -326,6 +355,7 @@ pub mod pallet { ) -> DispatchResult { let heavier = ensure_signed(origin)?; let lighter = T::Lookup::lookup(lighter)?; + Self::ensure_unlocked().map_err(|_| Error::::Locked)?; List::::put_in_front_of(&lighter, &heavier) .map_err::, _>(Into::into) .map_err::(Into::into) @@ -344,6 +374,7 @@ pub mod pallet { ensure_signed(origin)?; let lighter = T::Lookup::lookup(lighter)?; let heavier = T::Lookup::lookup(heavier)?; + Self::ensure_unlocked().map_err(|_| Error::::Locked)?; List::::put_in_front_of(&lighter, &heavier) .map_err::, _>(Into::into) .map_err::(Into::into) @@ -392,6 +423,13 @@ impl, I: 'static> Pallet { Ok(maybe_movement) } + fn ensure_unlocked() -> Result<(), ListError> { + match Lock::::get() { + None => Ok(()), + Some(()) => Err(ListError::Locked), + } + } + /// Equivalent to `ListBags::get`, but public. Useful for tests in outside of this crate. #[cfg(feature = "std")] pub fn list_bags_get(score: T::Score) -> Option> { @@ -407,6 +445,14 @@ impl, I: 'static> SortedListProvider for Pallet Box::new(List::::iter().map(|n| n.id().clone())) } + fn range() -> (Self::Score, Self::Score) { + use frame_support::traits::Get; + ( + T::BagThresholds::get().first().cloned().unwrap_or_default(), + T::BagThresholds::get().last().cloned().unwrap_or_default(), + ) + } + fn iter_from( start: &T::AccountId, ) -> Result>, Self::Error> { @@ -418,29 +464,40 @@ impl, I: 'static> SortedListProvider for Pallet ListNodes::::count() } + fn lock() { + Lock::::put(()) + } + + fn unlock() { + Lock::::kill() + } + fn contains(id: &T::AccountId) -> bool { List::::contains(id) } fn on_insert(id: T::AccountId, score: T::Score) -> Result<(), ListError> { + Pallet::::ensure_unlocked()?; List::::insert(id, score) } - fn get_score(id: &T::AccountId) -> Result { - List::::get_score(id) - } - fn on_update(id: &T::AccountId, new_score: T::Score) -> Result<(), ListError> { + Pallet::::ensure_unlocked()?; Pallet::::do_rebag(id, new_score).map(|_| ()) } + fn get_score(id: &T::AccountId) -> Result { + List::::get_score(id) + } + fn on_remove(id: &T::AccountId) -> Result<(), ListError> { + Pallet::::ensure_unlocked()?; List::::remove(id) } fn unsafe_regenerate( all: impl IntoIterator, - score_of: Box T::Score>, + score_of: Box Option>, ) -> u32 { // NOTE: This call is unsafe for the same reason as SortedListProvider::unsafe_regenerate. // I.e. because it can lead to many storage accesses. @@ -487,8 +544,8 @@ impl, I: 'static> SortedListProvider for Pallet impl, I: 'static> ScoreProvider for Pallet { type Score = as SortedListProvider>::Score; - fn score(id: &T::AccountId) -> T::Score { - Node::::get(id).map(|node| node.score()).unwrap_or_default() + fn score(id: &T::AccountId) -> Option { + Node::::get(id).map(|node| node.score()) } frame_election_provider_support::runtime_benchmarks_or_std_enabled! { diff --git a/substrate/frame/bags-list/src/list/mod.rs b/substrate/frame/bags-list/src/list/mod.rs index 5732e80e6bfb1..e32908b0630ac 100644 --- a/substrate/frame/bags-list/src/list/mod.rs +++ b/substrate/frame/bags-list/src/list/mod.rs @@ -71,6 +71,8 @@ pub enum ListError { NotInSameBag, /// Given node id was not found. NodeNotFound, + /// The List is locked, therefore updates cannot happen now. + Locked, } #[cfg(test)] @@ -130,7 +132,7 @@ impl, I: 'static> List { /// Returns the number of ids migrated. pub fn unsafe_regenerate( all: impl IntoIterator, - score_of: Box T::Score>, + score_of: Box Option>, ) -> u32 { // NOTE: This call is unsafe for the same reason as SortedListProvider::unsafe_regenerate. // I.e. because it can lead to many storage accesses. @@ -318,13 +320,16 @@ impl, I: 'static> List { /// Returns the final count of number of ids inserted. fn insert_many( ids: impl IntoIterator, - score_of: impl Fn(&T::AccountId) -> T::Score, + score_of: impl Fn(&T::AccountId) -> Option, ) -> u32 { let mut count = 0; ids.into_iter().for_each(|v| { - let score = score_of(&v); - if Self::insert(v, score).is_ok() { - count += 1; + if let Some(score) = score_of(&v) { + if Self::insert(v, score).is_ok() { + count += 1; + } + } else { + // nada } }); diff --git a/substrate/frame/bags-list/src/migrations.rs b/substrate/frame/bags-list/src/migrations.rs index bfc70fef370a5..8bffbb4a79661 100644 --- a/substrate/frame/bags-list/src/migrations.rs +++ b/substrate/frame/bags-list/src/migrations.rs @@ -111,7 +111,7 @@ impl, I: 'static> OnRuntimeUpgrade for AddScore { prev: node.prev, next: node.next, bag_upper: node.bag_upper, - score, + score: score.unwrap_or_default(), _phantom: node._phantom, }; diff --git a/substrate/frame/bags-list/src/mock.rs b/substrate/frame/bags-list/src/mock.rs index 3690a876f62d8..b33fc940a4073 100644 --- a/substrate/frame/bags-list/src/mock.rs +++ b/substrate/frame/bags-list/src/mock.rs @@ -29,7 +29,6 @@ pub type Balance = u32; parameter_types! { // Set the vote weight for any id who's weight has _not_ been set with `set_score_of`. - pub static NextVoteWeight: VoteWeight = 0; pub static NextVoteWeightMap: HashMap = Default::default(); } @@ -37,8 +36,8 @@ pub struct StakingMock; impl frame_election_provider_support::ScoreProvider for StakingMock { type Score = VoteWeight; - fn score(id: &AccountId) -> Self::Score { - *NextVoteWeightMap::get().get(id).unwrap_or(&NextVoteWeight::get()) + fn score(id: &AccountId) -> Option { + NextVoteWeightMap::get().get(id).cloned() } frame_election_provider_support::runtime_benchmarks_or_std_enabled! { diff --git a/substrate/frame/bags-list/src/tests.rs b/substrate/frame/bags-list/src/tests.rs index 0b382a4fcefa9..b959be745159b 100644 --- a/substrate/frame/bags-list/src/tests.rs +++ b/substrate/frame/bags-list/src/tests.rs @@ -138,6 +138,39 @@ mod pallet { }); } + #[test] + fn rebag_when_missing() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + // when + NEXT_VOTE_WEIGHT_MAP.with(|m| m.borrow_mut().remove(&3)); + + // then + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 3)); + + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 4])]); + }); + } + + #[test] + fn rebag_when_added() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + // when 5 is added, but somehow it is not present in the bags list. + NEXT_VOTE_WEIGHT_MAP.with(|m| m.borrow_mut().insert(5, 10)); + + // then + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 5)); + + // 5 is added + assert_eq!(List::::get_bags(), vec![(10, vec![1, 5]), (1_000, vec![2, 3, 4])]); + }); + } + // Rebagging the tail of a bag results in the old bag having a new tail and an overall correct // state. #[test] @@ -210,8 +243,9 @@ mod pallet { fn wrong_rebag_errs() { ExtBuilder::default().build_and_execute(|| { let node_3 = list::Node::::get(&3).unwrap(); + + NEXT_VOTE_WEIGHT_MAP.with(|m| m.borrow_mut().insert(500, 500)); // when account 3 is _not_ misplaced with score 500 - NextVoteWeight::set(500); assert!(!node_3.is_misplaced(500)); // then calling rebag on account 3 with score 500 is a noop @@ -736,3 +770,51 @@ mod sorted_list_provider { }) } } + +pub mod lock { + use super::*; + + #[test] + fn lock_prevents_list_update() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + // when + BagsList::lock(); + + assert_noop!(BagsList::on_update(&3, 2_000), ListError::Locked); + assert_noop!(BagsList::on_increase(&3, 2_000), ListError::Locked); + assert_noop!(BagsList::on_decrease(&3, 2_000), ListError::Locked); + assert_noop!(BagsList::on_remove(&3), ListError::Locked); + + // when + BagsList::unlock(); + + // then + assert_ok!(BagsList::on_remove(&3)); + }) + } + + #[test] + fn lock_prevents_calls() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4])]); + + // when + BagsList::lock(); + + // then + assert_noop!(BagsList::rebag(RuntimeOrigin::signed(0), 3), Error::::Locked); + assert_noop!( + BagsList::put_in_front_of(RuntimeOrigin::signed(3), 4), + Error::::Locked + ); + assert_noop!( + BagsList::put_in_front_of_other(RuntimeOrigin::signed(0), 3u64, 4), + Error::::Locked + ); + }) + } +} diff --git a/substrate/frame/beefy-mmr/src/tests.rs b/substrate/frame/beefy-mmr/src/tests.rs index 297fb28647ac3..fb61dc1879415 100644 --- a/substrate/frame/beefy-mmr/src/tests.rs +++ b/substrate/frame/beefy-mmr/src/tests.rs @@ -214,7 +214,7 @@ fn extract_validation_context_should_work_correctly() { // Check the MMR root log let expected_mmr_root: [u8; 32] = array_bytes::hex_n_into_unchecked( - "b2106eff9894288bc212b3a9389caa54efd37962c3a7b71b3b0b06a0911b88a5", + "d4f38bcfa95e1f03a06f7545aa95f24f5e10cc0bbd54cf97fbbff66d5be4769f", ); assert_eq!( System::digest().logs, diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index b8e952dfbd66d..1cb4c41f41b09 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -36,6 +36,7 @@ sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-staking = { workspace = true, default-features = true } sp-state-machine = { workspace = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 5df910541b301..b069a62b1180e 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -16,9 +16,6 @@ // limitations under the License. use codec::{Decode, DecodeWithMemTracking, Encode}; -use scale_info::TypeInfo; -use std::vec; - use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, onchain, SequentialPhragmen, Weight, @@ -29,7 +26,8 @@ use frame_support::{ }; use frame_system::pallet_prelude::HeaderFor; use pallet_session::historical as pallet_session_historical; -use sp_core::{crypto::KeyTypeId, ConstU128}; +use scale_info::TypeInfo; +use sp_core::{crypto::KeyTypeId, ConstBool, ConstU128}; use sp_runtime::{ app_crypto::ecdsa::Public, curve::PiecewiseLinear, @@ -180,7 +178,7 @@ parameter_types! { impl pallet_session::Config for Test { type RuntimeEvent = RuntimeEvent; type ValidatorId = u64; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type ShouldEndSession = pallet_session::PeriodicSessions, ConstU64<0>>; type NextSessionRotation = pallet_session::PeriodicSessions, ConstU64<0>>; type SessionManager = pallet_session::historical::NoteHistoricalRoot; @@ -191,8 +189,9 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } impl pallet_authorship::Config for Test { @@ -200,9 +199,10 @@ impl pallet_authorship::Config for Test { type EventHandler = (); } +type Balance = u128; #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u128; + type Balance = Balance; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; } @@ -238,7 +238,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; + type MaxWinnersPerPage = ConstU32<100>; + type MaxBackersPerWinner = ConstU32<100>; + type Sort = ConstBool; type Bounds = ElectionsBoundsOnChain; } @@ -278,6 +280,7 @@ impl ExtBuilder { } pub fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances: Vec<_> = diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs index a944d3808a237..0bc60b039741f 100644 --- a/substrate/frame/delegated-staking/src/mock.rs +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -32,7 +32,7 @@ use frame_election_provider_support::{ }; use frame_support::dispatch::RawOrigin; use pallet_staking::{ActiveEra, ActiveEraInfo, CurrentEra}; -use sp_core::U256; +use sp_core::{ConstBool, U256}; use sp_runtime::traits::Convert; use sp_staking::{Agent, Stake, StakingInterface}; @@ -96,7 +96,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; + type MaxWinnersPerPage = ConstU32<100>; + type MaxBackersPerWinner = ConstU32<100>; + type Sort = ConstBool; type Bounds = ElectionsBoundsOnChain; } diff --git a/substrate/frame/election-provider-multi-block/Cargo.toml b/substrate/frame/election-provider-multi-block/Cargo.toml new file mode 100644 index 0000000000000..907523d288305 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/Cargo.toml @@ -0,0 +1,84 @@ +[package] +name = "pallet-election-provider-multi-block" +version = "0.9.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "PALLET multi phase+block election providers" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { features = [ + "derive", +], workspace = true } +log = { workspace = true } +scale-info = { features = [ + "derive", +], workspace = true } + +frame-election-provider-support = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } + +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-npos-elections = { workspace = true } +sp-runtime = { workspace = true } +sp-std = { workspace = true } + +# Optional imports for benchmarking +frame-benchmarking = { optional = true, workspace = true } +rand = { features = ["alloc", "small_rng"], optional = true, workspace = true } + +[dev-dependencies] +frame-benchmarking = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +parking_lot = { workspace = true, default-features = true } +sp-core = { workspace = true } +sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-election-provider-support/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-balances/std", + "rand/std", + "scale-info/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-io/std", + "sp-npos-elections/std", + "sp-runtime/std", + "sp-std/std", + "sp-tracing/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "rand", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-election-provider-support/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-balances/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/election-provider-multi-block/src/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/benchmarking.rs new file mode 100644 index 0000000000000..efccf36e9f60e --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/benchmarking.rs @@ -0,0 +1,266 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + verifier::{self, Verifier}, + Config, CurrentPhase, Pallet, Phase, Snapshot, +}; +use frame_benchmarking::v2::*; +use frame_election_provider_support::{ElectionDataProvider, ElectionProvider}; +use frame_support::pallet_prelude::*; + +const SNAPSHOT_NOT_BIG_ENOUGH: &'static str = "Snapshot page is not full, you should run this \ +benchmark with enough genesis stakers in staking (DataProvider) to fill a page of voters/targets \ +as per VoterSnapshotPerBlock and TargetSnapshotPerBlock. Generate at least \ +2 * VoterSnapshotPerBlock) nominators and TargetSnapshotPerBlock validators"; + +// TODO: remove unwraps from all benchmarks of this pallet -- it makes debugging via wasm harder + +#[benchmarks(where T: crate::signed::Config + crate::unsigned::Config + crate::verifier::Config)] +mod benchmarks { + use super::*; + + #[benchmark(pov_mode = Measured)] + fn on_initialize_nothing() -> Result<(), BenchmarkError> { + assert_eq!(CurrentPhase::::get(), Phase::Off); + + #[block] + { + Pallet::::roll_next(true, false); + } + + assert_eq!(CurrentPhase::::get(), Phase::Off); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_into_snapshot_msp() -> Result<(), BenchmarkError> { + assert!(T::Pages::get() >= 2, "this benchmark only works in a runtime with 2 pages or more, set at least `type Pages = 2` for benchmark run"); + + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + assert_eq!(CurrentPhase::::get(), Phase::Snapshot(T::Pages::get())); + + #[block] + { + Pallet::::roll_next(true, false); + } + + // we have collected the target snapshot only + assert_eq!(CurrentPhase::::get(), Phase::Snapshot(T::Pages::get() - 1)); + assert_eq!( + Snapshot::::targets_decode_len().unwrap() as u32, + T::TargetSnapshotPerBlock::get(), + "{}", + SNAPSHOT_NOT_BIG_ENOUGH + ); + assert_eq!(Snapshot::::voters_decode_len(T::Pages::get() - 1), None); + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_into_snapshot_rest() -> Result<(), BenchmarkError> { + assert!(T::Pages::get() >= 2, "this benchmark only works in a runtime with 2 pages or more, set at least `type Pages = 2` for benchmark run"); + + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + // roll to the first block of the snapshot. + Pallet::::roll_until_matches(|| { + CurrentPhase::::get() == Phase::Snapshot(T::Pages::get() - 1) + }); + + // we have collected the target snapshot only + assert_eq!( + Snapshot::::targets_decode_len().unwrap() as u32, + T::TargetSnapshotPerBlock::get() + ); + // and no voters yet. + assert_eq!(Snapshot::::voters_decode_len(T::Pages::get() - 1), None); + + // take one more snapshot page. + #[block] + { + Pallet::::roll_next(true, false); + } + + // we have now collected the first page of voters. + assert_eq!(CurrentPhase::::get(), Phase::Snapshot(T::Pages::get() - 2)); + // it must be full + assert_eq!( + Snapshot::::voters_decode_len(T::Pages::get() - 1).unwrap() as u32, + T::VoterSnapshotPerBlock::get(), + "{}", + SNAPSHOT_NOT_BIG_ENOUGH + ); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_into_signed() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + Pallet::::roll_until_before_matches(|| { + matches!(CurrentPhase::::get(), Phase::Signed(_)) + }); + + assert_eq!(CurrentPhase::::get(), Phase::Snapshot(0)); + + #[block] + { + Pallet::::roll_next(true, false); + } + + assert!(CurrentPhase::::get().is_signed()); + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_into_signed_validation() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + Pallet::::roll_until_before_matches(|| { + matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) + }); + + assert!(CurrentPhase::::get().is_signed()); + + #[block] + { + Pallet::::roll_next(true, false); + } + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_into_unsigned() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + Pallet::::roll_until_before_matches(|| { + matches!(CurrentPhase::::get(), Phase::Unsigned(_)) + }); + assert!(matches!(CurrentPhase::::get(), Phase::SignedValidation(_))); + + #[block] + { + Pallet::::roll_next(true, false); + } + + assert!(matches!(CurrentPhase::::get(), Phase::Unsigned(_))); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn export_non_terminal() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + // submit a full solution. + crate::Pallet::::roll_to_signed_and_submit_full_solution()?; + + // fully verify it in the signed validation phase. + assert!(T::Verifier::queued_score().is_none()); + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::Unsigned(_)) + }); + + // full solution is queued. + assert!(T::Verifier::queued_score().is_some()); + assert_eq!(verifier::QueuedSolution::::valid_iter().count() as u32, T::Pages::get()); + + #[block] + { + // tell the data provider to do its election process for one page, while we are fully + // ready. + T::DataProvider::fetch_page(T::Pages::get() - 1) + } + + // we should be in the export phase now. + assert_eq!(CurrentPhase::::get(), Phase::Export(T::Pages::get() - 1)); + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn export_terminal() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + // submit a full solution. + crate::Pallet::::roll_to_signed_and_submit_full_solution()?; + + // fully verify it in the signed validation phase. + ensure!(T::Verifier::queued_score().is_none(), "nothing should be queued"); + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::Unsigned(_)) + }); + + // full solution is queued. + ensure!(T::Verifier::queued_score().is_some(), "something should be queued"); + ensure!( + verifier::QueuedSolution::::valid_iter().count() as u32 == T::Pages::get(), + "solution should be full" + ); + + // fetch all pages, except for the last one. + for i in 1..T::Pages::get() { + T::DataProvider::fetch_page(T::Pages::get() - i) + } + + assert_eq!(CurrentPhase::::get(), Phase::Export(1)); + + #[block] + { + // tell the data provider to do its election process for one page, while we are fully + // ready. + T::DataProvider::fetch_page(0) + } + + // we should be in the off phase now. + assert_eq!(CurrentPhase::::get(), Phase::Off); + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn manage() -> Result<(), BenchmarkError> { + // TODO + #[block] + {} + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::ExtBuilder::full().build_unchecked(), + crate::mock::Runtime + ); +} diff --git a/substrate/frame/election-provider-multi-block/src/helpers.rs b/substrate/frame/election-provider-multi-block/src/helpers.rs new file mode 100644 index 0000000000000..20396ac97d224 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/helpers.rs @@ -0,0 +1,227 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Some helper functions/macros for this crate. + +use crate::{ + types::{PageIndex, VoterOf}, + unsigned::miner::MinerConfig, + AllVoterPagesOf, SolutionTargetIndexOf, SolutionVoterIndexOf, VoteWeight, +}; +use frame_support::{traits::Get, BoundedVec}; +use sp_runtime::SaturatedConversion; +use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, prelude::*}; + +/// Emit a log specific to this pallet, setting the target to [`crate::LOG_PREFIX`] +#[macro_export] +macro_rules! log { + ($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: $crate::LOG_PREFIX, + concat!("[#{:?}] 🗳🗳🗳 ", $pattern), >::block_number() $(, $values)* + ) + }; +} + +/// Emit a log within a submodule of the pallet +#[macro_export] +macro_rules! sublog { + ($level:tt, $sub_pallet:tt, $pattern:expr $(, $values:expr)* $(,)?) => { + #[cfg(not(feature = "std"))] + log!($level, $pattern $(, $values )*); + #[cfg(feature = "std")] + log::$level!( + target: format!("{}::{}", $crate::LOG_PREFIX, $sub_pallet).as_ref(), + concat!("[#{:?}] 🗳🗳🗳 ", $pattern), >::block_number() $(, $values )* + ) + }; +} + +/// Emit a log from within the offchain miner. +#[macro_export] +macro_rules! miner_log { + ($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: $crate::LOG_PREFIX, + concat!("[⛏️miner] 🗳🗳🗳 ", $pattern) $(, $values)* + ) + }; +} + +/// Generate an `efficient closure of voters and the page in which they live in. +pub(crate) fn generate_voter_page_fn( + paged_snapshot: &AllVoterPagesOf, +) -> impl Fn(&T::AccountId) -> Option { + let mut cache: BTreeMap = BTreeMap::new(); + paged_snapshot + .iter() + .enumerate() + .map(|(page, whatever)| (page.saturated_into::(), whatever)) + .for_each(|(page, page_voters)| { + page_voters.iter().for_each(|(v, _, _)| { + let _existed = cache.insert(v.clone(), page); + // if a duplicate exists, we only consider the last one. Defensive only, should + // never happen. + debug_assert!(_existed.is_none()); + }); + }); + move |who| cache.get(who).copied() +} + +/// Generate a btree-map cache of the voters and their indices within the provided `snapshot`. +/// +/// This does not care about pagination. `snapshot` might be a single page or the entire blob of +/// voters. +/// +/// This can be used to efficiently build index getter closures. +pub(crate) fn generate_voter_cache>( + snapshot: &BoundedVec, AnyBound>, +) -> BTreeMap { + let mut cache: BTreeMap = BTreeMap::new(); + snapshot.iter().enumerate().for_each(|(i, (x, _, _))| { + let _existed = cache.insert(x.clone(), i); + // if a duplicate exists, we only consider the last one. Defensive only, should never + // happen. + debug_assert!(_existed.is_none()); + }); + + cache +} + +/// Create a function that returns the index of a voter in the snapshot. +/// +/// Same as [`voter_index_fn`] but the returned function owns all its necessary data; nothing is +/// borrowed. +pub(crate) fn voter_index_fn_owned( + cache: BTreeMap, +) -> impl Fn(&T::AccountId) -> Option> { + move |who| { + cache + .get(who) + .and_then(|i| >>::try_into(*i).ok()) + } +} + +/// Same as [`voter_index_fn`], but the returning index is converted into usize, if possible. +/// +/// ## Warning +/// +/// Note that this will represent the snapshot data from which the `cache` is generated. +pub(crate) fn voter_index_fn_usize( + cache: &BTreeMap, +) -> impl Fn(&T::AccountId) -> Option + '_ { + move |who| cache.get(who).cloned() +} + +/// A non-optimized, linear version of [`voter_index_fn`] that does not need a cache and does a +/// linear search. +/// +/// ## Warning +/// +/// Not meant to be used in production. +#[cfg(test)] +pub(crate) fn voter_index_fn_linear( + snapshot: &Vec>, +) -> impl Fn(&T::AccountId) -> Option> + '_ { + move |who| { + snapshot + .iter() + .position(|(x, _, _)| x == who) + .and_then(|i| >>::try_into(i).ok()) + } +} + +/// Create a function that returns the index of a target in the snapshot. +/// +/// The returned index type is the same as the one defined in `T::Solution::Target`. +/// +/// Note: to the extent possible, the returned function should be cached and reused. Producing that +/// function requires a `O(n log n)` data transform. Each invocation of that function completes +/// in `O(log n)`. +pub(crate) fn target_index_fn( + snapshot: &Vec, +) -> impl Fn(&T::AccountId) -> Option> + '_ { + let cache: BTreeMap<_, _> = + snapshot.iter().enumerate().map(|(idx, account_id)| (account_id, idx)).collect(); + move |who| { + cache + .get(who) + .and_then(|i| >>::try_into(*i).ok()) + } +} + +/// Create a function the returns the index to a target in the snapshot. +/// +/// The returned index type is the same as the one defined in `T::Solution::Target`. +/// +/// ## Warning +/// +/// Not meant to be used in production. +#[cfg(test)] +pub(crate) fn target_index_fn_linear( + snapshot: &Vec, +) -> impl Fn(&T::AccountId) -> Option> + '_ { + move |who| { + snapshot + .iter() + .position(|x| x == who) + .and_then(|i| >>::try_into(i).ok()) + } +} + +/// Create a function that can map a voter index ([`SolutionVoterIndexOf`]) to the actual voter +/// account using a linearly indexible snapshot. +pub(crate) fn voter_at_fn( + snapshot: &Vec>, +) -> impl Fn(SolutionVoterIndexOf) -> Option + '_ { + move |i| { + as TryInto>::try_into(i) + .ok() + .and_then(|i| snapshot.get(i).map(|(x, _, _)| x).cloned()) + } +} + +/// Create a function that can map a target index ([`SolutionTargetIndexOf`]) to the actual target +/// account using a linearly indexible snapshot. +pub(crate) fn target_at_fn( + snapshot: &Vec, +) -> impl Fn(SolutionTargetIndexOf) -> Option + '_ { + move |i| { + as TryInto>::try_into(i) + .ok() + .and_then(|i| snapshot.get(i).cloned()) + } +} + +/// Create a function to get the stake of a voter. +/// +/// ## Warning +/// +/// The cache need must be derived from the same snapshot. Zero is returned if a voter is +/// non-existent. +pub(crate) fn stake_of_fn<'a, T: MinerConfig, AnyBound: Get>( + snapshot: &'a BoundedVec, AnyBound>, + cache: &'a BTreeMap, +) -> impl Fn(&T::AccountId) -> VoteWeight + 'a { + move |who| { + if let Some(index) = cache.get(who) { + snapshot.get(*index).map(|(_, x, _)| x).cloned().unwrap_or_default() + } else { + 0 + } + } +} diff --git a/substrate/frame/election-provider-multi-block/src/lib.rs b/substrate/frame/election-provider-multi-block/src/lib.rs new file mode 100644 index 0000000000000..094542dd722b7 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/lib.rs @@ -0,0 +1,2627 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Multi-phase, multi-block, election provider pallet. +//! +//! > This pallet is sometimes abbreviated as `EPMB`, and `pallet_election_provider_multi_phase` as +//! > `EPM`. +//! +//! ## Overall idea +//! +//! `pallet_election_provider_multi_phase` provides the basic ability for NPoS solutions to be +//! computed offchain (essentially anywhere) and submitted back to the chain as signed or unsigned +//! transaction, with sensible configurations and fail-safe mechanisms to ensure system safety. +//! Nonetheless, it has a limited capacity in terms of number of voters it can process in a **single +//! block**. +//! +//! This pallet takes `EPM` system, keeps most of its ideas and core premises, and extends it to +//! support paginated, multi-block operations. The final goal of this pallet is to scale linearly +//! with the number of blocks allocated to the elections. Moreover, the amount of work that it does +//! in one block should be bounded and measurable, making it suitable for a parachain. In principle, +//! with large enough blocks (in a dedicated parachain), the number of voters included in the NPoS +//! system can grow significantly (yet, obviously not indefinitely). +//! +//! Note that this pallet does not consider how the recipient is processing the results. To ensure +//! scalability, the recipient of this pallet's data (i.e. `pallet-staking`) must also be capable of +//! pagination and multi-block processing. +//! +//! ## Companion pallets +//! +//! This pallet will only function in a sensible way if it is peered with its companion pallets. +//! +//! - The [`verifier`] pallet provides a standard implementation of the [`verifier::Verifier`]. This +//! pallet is mandatory. +//! - The [`unsigned`] module provides the implementation of unsigned submission by validators. If +//! this pallet is included, then [`Config::UnsignedPhase`] will determine its duration. +//! - The [`signed`] module provides the implementation of the signed submission by any account. If +//! this pallet is included, the combined [`Config::SignedPhase`] and +//! [`Config::SignedValidationPhase`] will determine its duration +//! +//! These pallets are in fact hierarchical. This particular one is the top level one. It contains +//! the shared information that all child pallets use. All child pallets depend on the top level +//! pallet ONLY, but not the other way around. For those cases, traits are used. +//! +//! As in, notice that [`crate::verifier::Config`] relies on [`crate::Config`], but for the +//! reverse, we rely on [`crate::verifier::Verifier`] trait, which is indeed part of +//! [`crate::Config`]. This is merely an implementation opinion. +//! +//! ### Pallet Ordering: +//! +//! The ordering of these pallets in a runtime should be: +//! 1. parent +//! 2. verifier +//! 3. signed +//! 4. unsigned +//! +//! This is critical for the phase transition to work. +//! +//! > This should be manually checked, there is not automated way to test it. +//! +//! ## Pagination +//! +//! Most of the external APIs of this pallet are paginated. All pagination follow a pattern where if +//! `N` pages exist, the first paginated call is `function(N-1)` and the last one is `function(0)`. +//! For example, with 3 pages, the `elect` of [`ElectionProvider`] is expected to be called as +//! `elect(2) -> elect(1) -> elect(0)`. In essence, calling a paginated function with index 0 is +//! always a signal of termination, meaning that no further calls will follow. +//! +//! The snapshot creation for voters (Nominators in staking), submission of signed pages, validation +//! of signed solutions and exporting of pages are all paginated. Note that this pallet is yet to +//! support paginated target (Validators in staking) snapshotting. +//! +//! ### Terminology Note: `msp` and `lsp` +//! +//! Stand for _most significant page_ (n-1) and _least significant page_ (0). +//! +//! See [`ElectionProvider::msp`] and [`ElectionProvider::lsp`], and their usage. +//! +//! ## Phases +//! +//! The operations in this pallet are divided intor rounds, a `u32` number stored in [`Round`]. +//! This value helps this pallet organize itself, and leaves the door open for lazy deletion of any +//! stale data. A round, under the happy path, starts by receiving the call to +//! [`ElectionProvider::start`], and is terminated by receiving a call to +//! [`ElectionProvider::elect`] with value 0. +//! +//! The timeline of pallet is overall as follows: +//! +//! ```ignore +//! < Off > +//! 0 ------------ 12 13 14 15 ----------- 20 ---------25 ------- 30 +//! | | | | | +//! Snapshot Signed SignedValidation Unsigned Elect +//! ``` +//! +//! * Duration of `Snapshot` is determined by [`Config::Pages`] + 1. +//! * Whereby in the first page we take the "Targets" snapshot, and in the subsequent pages we take +//! the voter snapshot. +//! * For example, with `Pages = 4`: +//! * `Snapshot(4)` -> `Targets(all)` +//! * `Snapshot(3)` -> `Voters(3)` +//! * `Snapshot(2)` -> `Voters(2)` +//! * `Snapshot(1)` -> `Voters(1)` +//! * `Snapshot(0)` -> `Voters(0)` +//! * Duration of `Signed`, `SignedValidation` and `Unsigned` are determined by +//! [`Config::SignedPhase`], [`Config::SignedValidationPhase`] and [`Config::UnsignedPhase`] +//! respectively. +//! * [`Config::Pages`] calls to elect are expected, but all in all the pallet will close a round +//! once `elect(0)` is called. +//! +//! > Given this, it is rather important for the user of this pallet to ensure it always terminates +//! > election via `elect` before requesting a new one. +//! +//! ## Feasible Solution (correct solution) +//! +//! All submissions must undergo a feasibility check. Signed solutions are checked one by one at the +//! end of the signed phase, and the unsigned solutions are checked on the spot. A feasible solution +//! is as follows: +//! +//! 0. **all** of the used indices must be correct. +//! 1. present *exactly* correct number of winners. +//! 2. any assignment is checked to match with `PagedVoterSnapshot`. +//! 3. the claimed score is valid, based on the fixed point arithmetic accuracy. +//! +//! More about this in [`verifier`], who is responsible for doing all of the above. +//! +//! ### Fallback and Emergency +//! +//! If at any page, [`ElectionProvider::elect`] fails, a call with the same page-index is dispatched +//! to [`Config::Fallback`]. [`Config::Fallback`] is itself (yet) another implementation of +//! [`ElectionProvider`], and can decide to do anything, but a few reasonable options are provided +//! here: +//! +//! 1. Do nothing: [`Continue`] +//! 2. Force us into the emergency phase: [`crate::InitiateEmergencyPhase`]. This initiates +//! [`Phase::Emergency`], which will halt almost all operations of this pallet, and it can only +//! be recovered by [`AdminOperation`], dispatched via [`Call::manage`]. +//! 3. compute an onchain from the give page of snapshot. +//! +//! Note that configuring the fallback to be onchain computation is not recommended, unless for +//! test-nets for a number of reasons: +//! +//! 1. The solution score of fallback is never checked to match the "minimum" score. That being +//! said, the computation happens onchain so we can trust it. +//! 2. The onchain fallback runs on the same number of voters and targets that reside on a single +//! page of a snapshot, which will very likely be too much for actual onchain computation. Yet, +//! we don't have another choice as we cannot request another smaller snapshot from the data +//! provider mid-election without more bookkeeping on the staking side. +//! +//! If onchain solution is to be seriously considered, an improvement to this pallet should +//! re-request a smaller set of voters from `T::DataProvider` in a stateless manner. +//! +//! ### Signed Phase +//! +//! Signed phase is when an offchain miner, aka, `polkadot-staking-miner` should operate upon. See +//! [`signed`] for more information. +//! +//! ## Unsigned Phase +//! +//! Unsigned phase is a built-in fallback in which validators may submit a single page election, +//! taking into account only the [`ElectionProvider::msp`] (_most significant page_). See +//! [`crate::unsigned`] for more information. + +// Implementation notes: +// +// - Naming convention is: `${singular}_page` for singular, e.g. `voter_page` for `Vec`. +// `paged_${plural}` for plural, e.g. `paged_voters` for `Vec>`. +// +// - Since this crate has multiple `Pallet` and `Configs`, in each sub-pallet, we only reference the +// local `Pallet` without a prefix and allow it to be imported via `use`. Avoid `super::Pallet` +// except for the case of a modules that want to reference their local `Pallet` . The +// `crate::Pallet` is always reserved for the parent pallet. Other sibling pallets must be +// referenced with full path, e.g. `crate::Verifier::Pallet`. Do NOT write something like `use +// unsigned::Pallet as UnsignedPallet`. +// +// - Respecting private storage items with wrapper We move all implementations out of the `mod +// pallet` as much as possible to ensure we NEVER access the internal storage items directly. All +// operations should happen with the wrapper types. + +#![cfg_attr(not(feature = "std"), no_std)] + +use crate::types::*; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_election_provider_support::{ + onchain, BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider, + InstantElectionProvider, +}; +use frame_support::{ + pallet_prelude::*, + traits::{Defensive, EnsureOrigin}, + DebugNoBound, Twox64Concat, +}; +use frame_system::pallet_prelude::*; +use scale_info::TypeInfo; +use sp_arithmetic::{ + traits::{CheckedAdd, Zero}, + PerThing, UpperOf, +}; +use sp_npos_elections::VoteWeight; +use sp_runtime::{ + traits::{Hash, Saturating}, + SaturatedConversion, +}; +use sp_std::{borrow::ToOwned, boxed::Box, prelude::*}; +use verifier::Verifier; + +#[cfg(test)] +mod mock; +#[macro_use] +pub mod helpers; +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; + +/// The common logginv prefix of all pallets in this crate. +pub const LOG_PREFIX: &'static str = "runtime::multiblock-election"; + +macro_rules! clear_paged_map { + ($map: ty) => {{ + let __r = <$map>::clear(u32::MAX, None); + debug_assert!(__r.unique <= T::Pages::get(), "clearing map caused too many removals") + }}; +} + +/// The signed pallet +pub mod signed; +/// Common types of the pallet +pub mod types; +/// The unsigned pallet +pub mod unsigned; +/// The verifier pallet +pub mod verifier; +/// The weight module +pub mod weights; + +pub use pallet::*; +pub use types::*; +pub use weights::measured::pallet_election_provider_multi_block::WeightInfo; + +/// A fallback implementation that transitions the pallet to the emergency phase. +pub struct InitiateEmergencyPhase(sp_std::marker::PhantomData); +impl ElectionProvider for InitiateEmergencyPhase { + type AccountId = T::AccountId; + type BlockNumber = BlockNumberFor; + type DataProvider = T::DataProvider; + type Error = &'static str; + type Pages = T::Pages; + type MaxBackersPerWinner = ::MaxBackersPerWinner; + type MaxWinnersPerPage = ::MaxWinnersPerPage; + + fn elect(_page: PageIndex) -> Result, Self::Error> { + Pallet::::phase_transition(Phase::Emergency); + Err("Emergency phase started.") + } + + fn status() -> Result { + Ok(true) + } + + fn start() -> Result<(), Self::Error> { + Ok(()) + } + + fn duration() -> Self::BlockNumber { + Zero::zero() + } +} + +impl InstantElectionProvider for InitiateEmergencyPhase { + fn instant_elect( + _voters: Vec>, + _targets: Vec, + _desired_targets: u32, + ) -> Result, Self::Error> { + Self::elect(0) + } + + fn bother() -> bool { + false + } +} + +/// A fallback implementation that silently continues into the next page. +/// +/// This is suitable for onchain usage. +pub struct Continue(sp_std::marker::PhantomData); +impl ElectionProvider for Continue { + type AccountId = T::AccountId; + type BlockNumber = BlockNumberFor; + type DataProvider = T::DataProvider; + type Error = &'static str; + type Pages = T::Pages; + type MaxBackersPerWinner = ::MaxBackersPerWinner; + type MaxWinnersPerPage = ::MaxWinnersPerPage; + + fn elect(_page: PageIndex) -> Result, Self::Error> { + log!(warn, "'Continue' fallback will do nothing"); + Err("'Continue' fallback will do nothing") + } + + fn start() -> Result<(), Self::Error> { + Ok(()) + } + + fn duration() -> Self::BlockNumber { + Zero::zero() + } + + fn status() -> Result { + Ok(true) + } +} + +impl InstantElectionProvider for Continue { + fn instant_elect( + _voters: Vec>, + _targets: Vec, + _desired_targets: u32, + ) -> Result, Self::Error> { + Self::elect(0) + } + + fn bother() -> bool { + false + } +} + +/// A easy means to configure [`Config::AreWeDone`]. +/// +/// With this, you can say what to do if a solution is queued, or what to do if not. +/// +/// Two common shorthands of this are provided: +/// * [`ProceedRegardlessOf`] +/// * [`RevertToSignedIfNotQueuedOf`] +pub struct IfSolutionQueuedElse( + sp_std::marker::PhantomData<(T, Queued, NotQueued)>, +); + +/// A `Get` impl for `Phase::Done` +pub struct GetDone(sp_std::marker::PhantomData); +impl Get> for GetDone { + fn get() -> Phase { + Phase::Done + } +} + +/// A `Get` impl for `Phase::Signed(T::SignedPhase::get())` +pub struct GetSigned(sp_std::marker::PhantomData); +impl Get> for GetSigned { + fn get() -> Phase { + Phase::Signed(T::SignedPhase::get().saturating_sub(1u32.into())) + } +} + +/// A shorthand for [`IfSolutionQueuedElse`] that proceeds regardless of the solution being queued. +pub type ProceedRegardlessOf = IfSolutionQueuedElse, GetDone>; + +/// A shorthand for [`IfSolutionQueuedElse`] that proceeds to `Phase::Done` if the solution is +/// queued. Otherwise, it proceeds to `Phase::Signed`. +pub type RevertToSignedIfNotQueuedOf = IfSolutionQueuedElse, GetSigned>; + +impl IfSolutionQueuedElse { + fn something_queued() -> bool { + let queued_score = ::queued_score().is_some(); + #[cfg(debug_assertions)] + { + let any_pages_queued = (Pallet::::lsp()..=Pallet::::msp()).any(|p| { + ::get_queued_solution_page(p).is_some() + }); + assert_eq!( + queued_score, any_pages_queued, + "queued score ({}) and queued pages ({}) must match", + queued_score, any_pages_queued + ); + } + queued_score + } +} + +impl>, NotQueued: Get>> Get> + for IfSolutionQueuedElse +{ + fn get() -> Phase { + if Self::something_queued() { + Queued::get() + } else { + NotQueued::get() + } + } +} + +/// Internal errors of the pallet. This is used in the implementation of [`ElectionProvider`]. +/// +/// Note that this is different from [`pallet::Error`]. +#[derive( + frame_support::DebugNoBound, frame_support::PartialEqNoBound, frame_support::EqNoBound, +)] +pub enum ElectionError { + /// An error happened in the feasibility check sub-system. + Feasibility(verifier::FeasibilityError), + /// An error in the fallback. + Fallback(FallbackErrorOf), + /// An error in the onchain seq-phragmen implementation + OnChain(onchain::Error), + /// An error happened in the data provider. + DataProvider(&'static str), + /// the corresponding page in the queued supports is not available. + SupportPageNotAvailable, + /// The election is not ongoing and therefore no results may be queried. + NotOngoing, + /// The election is currently ongoing, and therefore we cannot start again. + Ongoing, + /// Other misc error + Other(&'static str), +} + +impl From for ElectionError { + fn from(e: onchain::Error) -> Self { + ElectionError::OnChain(e) + } +} + +impl From for ElectionError { + fn from(e: verifier::FeasibilityError) -> Self { + ElectionError::Feasibility(e) + } +} + +/// Different operations that the [`Config::AdminOrigin`] can perform on the pallet. +#[derive( + Encode, + Decode, + DecodeWithMemTracking, + MaxEncodedLen, + TypeInfo, + DebugNoBound, + CloneNoBound, + PartialEqNoBound, + EqNoBound, +)] +#[codec(mel_bound(T: Config))] +#[scale_info(skip_type_params(T))] +pub enum AdminOperation { + /// Forcefully go to the next round, starting from the Off Phase. + ForceRotateRound, + /// Force-set the phase to the given phase. + /// + /// This can have many many combinations, use only with care and sufficient testing. + ForceSetPhase(Phase), + /// Set the given (single page) emergency solution. + /// + /// Can only be called in emergency phase. + EmergencySetSolution(Box>>, ElectionScore), + /// Trigger the (single page) fallback in `instant` mode, with the given parameters, and + /// queue it if correct. + /// + /// Can only be called in emergency phase. + EmergencyFallback, + /// Set the minimum untrusted score. This is directly communicated to the verifier component to + /// be taken into account. + /// + /// This is useful in preventing any serious issue where due to a bug we accept a very bad + /// solution. + SetMinUntrustedScore(ElectionScore), +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + #[pallet::config] + pub trait Config: frame_system::Config { + /// Duration of the unsigned phase. + #[pallet::constant] + type UnsignedPhase: Get>; + /// Duration of the signed phase. + #[pallet::constant] + type SignedPhase: Get>; + /// Duration of the singed validation phase. + /// + /// The duration of this should not be less than `T::Pages`, and there is no point in it + /// being more than `SignedPhase::MaxSubmission::get() * T::Pages`. TODO: integrity test for + /// it. + #[pallet::constant] + type SignedValidationPhase: Get>; + + /// The number of snapshot voters to fetch per block. + #[pallet::constant] + type VoterSnapshotPerBlock: Get; + + /// The number of snapshot targets to fetch per block. + #[pallet::constant] + type TargetSnapshotPerBlock: Get; + + /// The number of pages. + /// + /// The snapshot is created with this many keys in the storage map. + /// + /// The solutions may contain at MOST this many pages, but less pages are acceptable as + /// well. + #[pallet::constant] + type Pages: Get; + + /// Something that will provide the election data. + type DataProvider: ElectionDataProvider< + AccountId = Self::AccountId, + BlockNumber = BlockNumberFor, + >; + + /// The miner configuration. + /// + /// These configurations are passed to [`crate::unsigned::miner::BaseMiner`]. An external + /// miner implementation should implement this trait, and use the said `BaseMiner`. + type MinerConfig: crate::unsigned::miner::MinerConfig< + Pages = Self::Pages, + AccountId = ::AccountId, + MaxVotesPerVoter = ::MaxVotesPerVoter, + VoterSnapshotPerBlock = Self::VoterSnapshotPerBlock, + TargetSnapshotPerBlock = Self::TargetSnapshotPerBlock, + MaxBackersPerWinner = ::MaxBackersPerWinner, + MaxWinnersPerPage = ::MaxWinnersPerPage, + >; + + /// The fallback type used for the election. + type Fallback: InstantElectionProvider< + AccountId = Self::AccountId, + BlockNumber = BlockNumberFor, + DataProvider = Self::DataProvider, + MaxBackersPerWinner = ::MaxBackersPerWinner, + MaxWinnersPerPage = ::MaxWinnersPerPage, + >; + + /// The verifier pallet's interface. + type Verifier: verifier::Verifier< + Solution = SolutionOf, + AccountId = Self::AccountId, + > + verifier::AsynchronousVerifier; + + /// The origin that can perform administration operations on this pallet. + type AdminOrigin: EnsureOrigin; + + /// An indicator of whether we should move to do the [`crate::types::Phase::Done`] or not? + /// This is called at the end of the election process. + /// + /// Common implementation is [`ProceedRegardlessOf`] or [`RevertToSignedIfNotQueuedOf`]. + type AreWeDone: Get>; + + /// The weight of the pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::call] + impl Pallet { + /// Manage this pallet. + /// + /// The origin of this call must be [`Config::AdminOrigin`]. + /// + /// See [`AdminOperation`] for various operations that are possible. + #[pallet::weight(T::WeightInfo::manage())] + #[pallet::call_index(0)] + pub fn manage(origin: OriginFor, op: AdminOperation) -> DispatchResultWithPostInfo { + use crate::verifier::Verifier; + use sp_npos_elections::EvaluateSupport; + + let _ = T::AdminOrigin::ensure_origin(origin); + match op { + AdminOperation::EmergencyFallback => { + ensure!(Self::current_phase() == Phase::Emergency, Error::::UnexpectedPhase); + // note: for now we run this on the msp, but we can make it configurable if need + // be. + let voters = Snapshot::::voters(Self::msp()).ok_or(Error::::Snapshot)?; + let targets = Snapshot::::targets().ok_or(Error::::Snapshot)?; + let desired_targets = + Snapshot::::desired_targets().ok_or(Error::::Snapshot)?; + let fallback = T::Fallback::instant_elect( + voters.into_inner(), + targets.into_inner(), + desired_targets, + ) + .map_err(|e| { + log!(warn, "Fallback failed: {:?}", e); + Error::::Fallback + })?; + let score = fallback.evaluate(); + T::Verifier::force_set_single_page_valid(fallback, 0, score); + Ok(().into()) + }, + AdminOperation::EmergencySetSolution(supports, score) => { + ensure!(Self::current_phase() == Phase::Emergency, Error::::UnexpectedPhase); + // TODO: hardcoding zero here doesn't make a lot of sense + T::Verifier::force_set_single_page_valid(*supports, 0, score); + Ok(().into()) + }, + AdminOperation::ForceSetPhase(phase) => { + Self::phase_transition(phase); + Ok(().into()) + }, + AdminOperation::ForceRotateRound => { + Self::rotate_round(); + Ok(().into()) + }, + AdminOperation::SetMinUntrustedScore(score) => { + T::Verifier::set_minimum_score(score); + Ok(().into()) + }, + } + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_now: BlockNumberFor) -> Weight { + let current_phase = CurrentPhase::::get(); + let weight1 = match current_phase { + Phase::Snapshot(x) if x == T::Pages::get() => { + // create the target snapshot + Self::create_targets_snapshot().defensive_unwrap_or_default(); + T::WeightInfo::on_initialize_into_snapshot_msp() + }, + Phase::Snapshot(x) => { + // create voter snapshot + Self::create_voters_snapshot_paged(x).unwrap(); + T::WeightInfo::on_initialize_into_snapshot_rest() + }, + _ => T::WeightInfo::on_initialize_nothing(), + }; + + // in all cases, go to next phase + let next_phase = current_phase.next(); + + let weight2 = match next_phase { + Phase::Signed(_) => T::WeightInfo::on_initialize_into_signed(), + Phase::SignedValidation(_) => T::WeightInfo::on_initialize_into_signed_validation(), + Phase::Unsigned(_) => T::WeightInfo::on_initialize_into_unsigned(), + _ => T::WeightInfo::on_initialize_nothing(), + }; + + Self::phase_transition(next_phase); + + // bit messy, but for now this works best. + #[cfg(test)] + { + let test_election_start: BlockNumberFor = + (crate::mock::ElectionStart::get() as u32).into(); + if _now == test_election_start { + crate::log!(info, "TESTING: Starting election at block {}", _now); + crate::mock::MultiBlock::start().unwrap(); + } + } + + weight1 + weight2 + } + + fn integrity_test() { + use sp_std::mem::size_of; + // The index type of both voters and targets need to be smaller than that of usize (very + // unlikely to be the case, but anyhow). + assert!(size_of::>() <= size_of::()); + assert!(size_of::>() <= size_of::()); + + // also, because `VoterSnapshotPerBlock` and `TargetSnapshotPerBlock` are in u32, we + // assert that both of these types are smaller than u32 as well. + assert!(size_of::>() <= size_of::()); + assert!(size_of::>() <= size_of::()); + + // pages must be at least 1. + assert!(T::Pages::get() > 0); + + // Based on the requirements of [`sp_npos_elections::Assignment::try_normalize`]. + let max_vote: usize = as NposSolution>::LIMIT; + + // 2. Maximum sum of [SolutionAccuracy; 16] must fit into `UpperOf`. + let maximum_chain_accuracy: Vec>> = (0.. + max_vote) + .map(|_| { + >>::from( + >::one().deconstruct(), + ) + }) + .collect(); + let _: UpperOf> = maximum_chain_accuracy + .iter() + .fold(Zero::zero(), |acc, x| acc.checked_add(x).unwrap()); + + // We only accept data provider who's maximum votes per voter matches our + // `T::Solution`'s `LIMIT`. + // + // NOTE that this pallet does not really need to enforce this in runtime. The + // solution cannot represent any voters more than `LIMIT` anyhow. + assert_eq!( + ::MaxVotesPerVoter::get(), + as NposSolution>::LIMIT as u32, + ); + + // Either (signed + signed validation) is non-zero, or unsigned is non-zero + let has_signed = !T::SignedPhase::get().is_zero(); + let signed_validation = T::SignedValidationPhase::get(); + let has_signed_validation = !signed_validation.is_zero(); + let has_unsigned = !T::UnsignedPhase::get().is_zero(); + assert!( + has_signed == has_signed_validation, + "Signed phase not set correct -- both should be set or unset" + ); + assert!( + signed_validation.is_zero() || signed_validation >= T::Pages::get().into(), + "signed validation phase should be at least as long as the number of pages." + ); + + assert!(has_signed || has_unsigned, "either signed or unsigned phase must be set"); + } + + #[cfg(feature = "try-runtime")] + fn try_state(now: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state(now).map_err(Into::into) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A phase transition happened. Only checks major changes in the variants, not minor inner + /// values. + PhaseTransitioned { + /// the source phase + from: Phase, + /// The target phase + to: Phase, + }, + } + + /// Error of the pallet that can be returned in response to dispatches. + #[pallet::error] + pub enum Error { + /// Triggering the `Fallback` failed. + Fallback, + /// Unexpected phase + UnexpectedPhase, + /// Snapshot was unavailable. + Snapshot, + } + + /// Common errors in all sub-pallets and miner. + #[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)] + pub enum CommonError { + /// Submission is too early (or too late, depending on your point of reference). + EarlySubmission, + /// The round counter is wrong. + WrongRound, + /// Submission is too weak to be considered an improvement. + WeakSubmission, + /// Wrong number of pages in the solution. + WrongPageCount, + /// Wrong number of winners presented. + WrongWinnerCount, + /// The snapshot fingerprint is not a match. The solution is likely outdated. + WrongFingerprint, + /// Snapshot was not available. + Snapshot, + } + + /// Internal counter for the number of rounds. + /// + /// This is useful for de-duplication of transactions submitted to the pool, and general + /// diagnostics of the pallet. + /// + /// This is merely incremented once per every time that an upstream `elect` is called. + #[pallet::storage] + #[pallet::getter(fn round)] + pub type Round = StorageValue<_, u32, ValueQuery>; + + /// Current phase. + #[pallet::storage] + #[pallet::getter(fn current_phase)] + pub type CurrentPhase = StorageValue<_, Phase, ValueQuery>; + + /// Wrapper struct for working with snapshots. + /// + /// It manages the following storage items: + /// + /// - `DesiredTargets`: The number of targets that we wish to collect. + /// - `PagedVoterSnapshot`: Paginated map of voters. + /// - `PagedVoterSnapshotHash`: Hash of the aforementioned. + /// - `PagedTargetSnapshot`: Paginated map of targets. + /// - `PagedTargetSnapshotHash`: Hash of the aforementioned. + /// + /// ### Invariants + /// + /// The following invariants must be met at **all times** for this storage item to be "correct". + /// + /// - `PagedVoterSnapshotHash` must always contain the correct the same number of keys, and the + /// corresponding hash of the `PagedVoterSnapshot`. + /// - `PagedTargetSnapshotHash` must always contain the correct the same number of keys, and the + /// corresponding hash of the `PagedTargetSnapshot`. + /// + /// - If any page from the paged voters/targets exists, then the aforementioned (desired + /// targets) must also exist. + /// + /// The following invariants might need to hold based on the current phase. + /// + /// - If `Phase` IS `Snapshot(_)`, then partial voter/target pages must exist from `msp` to + /// `lsp` based on the inner value. + /// - If `Phase` IS `Off`, then, no snapshot must exist. + /// - In all other phases, the snapshot must FULLY exist. + pub(crate) struct Snapshot(sp_std::marker::PhantomData); + impl Snapshot { + // ----------- mutable methods + pub(crate) fn set_desired_targets(d: u32) { + DesiredTargets::::put(d); + } + + pub(crate) fn set_targets(targets: BoundedVec) { + let hash = Self::write_storage_with_pre_allocate( + &PagedTargetSnapshot::::hashed_key_for(Pallet::::msp()), + targets, + ); + PagedTargetSnapshotHash::::insert(Pallet::::msp(), hash); + } + + pub(crate) fn set_voters(page: PageIndex, voters: VoterPageOf) { + let hash = Self::write_storage_with_pre_allocate( + &PagedVoterSnapshot::::hashed_key_for(page), + voters, + ); + PagedVoterSnapshotHash::::insert(page, hash); + } + + /// Destroy the entire snapshot. + /// + /// Should be called only once we transition to [`Phase::Off`]. + pub(crate) fn kill() { + DesiredTargets::::kill(); + clear_paged_map!(PagedVoterSnapshot::); + clear_paged_map!(PagedVoterSnapshotHash::); + clear_paged_map!(PagedTargetSnapshot::); + clear_paged_map!(PagedTargetSnapshotHash::); + } + + // ----------- non-mutables + pub(crate) fn desired_targets() -> Option { + DesiredTargets::::get() + } + + pub(crate) fn voters(page: PageIndex) -> Option> { + PagedVoterSnapshot::::get(page) + } + + pub(crate) fn targets() -> Option> { + // NOTE: targets always have one index, which is 0, aka lsp. + PagedTargetSnapshot::::get(Pallet::::msp()) + } + + /// Get a fingerprint of the snapshot, from all the hashes that are stored for each page of + /// the snapshot. + /// + /// This is computed as: `(target_hash, voter_hash_n, voter_hash_(n-1), ..., voter_hash_0)` + /// where `n` is `T::Pages - 1`. In other words, it is the concatenated hash of targets, and + /// voters, from `msp` to `lsp`. + pub fn fingerprint() -> T::Hash { + let mut hashed_target_and_voters = + Self::targets_hash().unwrap_or_default().as_ref().to_vec(); + let hashed_voters = (Pallet::::msp()..=Pallet::::lsp()) + .map(|i| PagedVoterSnapshotHash::::get(i).unwrap_or_default()) + .flat_map(|hash| >::as_ref(&hash).to_owned()) + .collect::>(); + hashed_target_and_voters.extend(hashed_voters); + T::Hashing::hash(&hashed_target_and_voters) + } + + fn write_storage_with_pre_allocate(key: &[u8], data: E) -> T::Hash { + let size = data.encoded_size(); + let mut buffer = Vec::with_capacity(size); + data.encode_to(&mut buffer); + + let hash = T::Hashing::hash(&buffer); + + // do some checks. + debug_assert_eq!(buffer, data.encode()); + // buffer should have not re-allocated since. + debug_assert!(buffer.len() == size && size == buffer.capacity()); + sp_io::storage::set(key, &buffer); + + hash + } + + pub(crate) fn targets_hash() -> Option { + PagedTargetSnapshotHash::::get(Pallet::::msp()) + } + } + + #[allow(unused)] + #[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))] + impl Snapshot { + ///Ensure target snapshot exists. + pub(crate) fn ensure_target_snapshot(exists: bool) -> Result<(), &'static str> { + ensure!(exists ^ Self::desired_targets().is_none(), "desired target mismatch"); + ensure!(exists ^ Self::targets().is_none(), "targets mismatch"); + ensure!(exists ^ Self::targets_hash().is_none(), "targets hash mismatch"); + + // and the hash is correct. + if let Some(targets) = Self::targets() { + let hash = Self::targets_hash().expect("must exist; qed"); + ensure!(hash == T::Hashing::hash(&targets.encode()), "targets hash mismatch"); + } + Ok(()) + } + + /// Ensure voters exists, from page `T::Pages::get()` for `up_to_page` subsequent pages. + pub(crate) fn ensure_voter_snapshot( + exists: bool, + mut up_to_page: PageIndex, + ) -> Result<(), &'static str> { + up_to_page = up_to_page.min(T::Pages::get()); + // ensure that voter pages that should exist, indeed to exist.. + let mut sum_existing_voters: usize = 0; + for p in (crate::Pallet::::lsp()..=crate::Pallet::::msp()) + .rev() + .take(up_to_page as usize) + { + ensure!( + (exists ^ Self::voters(p).is_none()) && + (exists ^ Self::voters_hash(p).is_none()), + "voter page existence mismatch" + ); + + if let Some(voters_page) = Self::voters(p) { + sum_existing_voters = sum_existing_voters.saturating_add(voters_page.len()); + let hash = Self::voters_hash(p).expect("must exist; qed"); + ensure!(hash == T::Hashing::hash(&voters_page.encode()), "voter hash mismatch"); + } + } + + // ..and those that should not exist, indeed DON'T. + for p in (crate::Pallet::::lsp()..=crate::Pallet::::msp()) + .take((T::Pages::get() - up_to_page) as usize) + { + ensure!( + (exists ^ Self::voters(p).is_some()) && + (exists ^ Self::voters_hash(p).is_some()), + "voter page non-existence mismatch" + ); + } + Ok(()) + } + + pub(crate) fn ensure_snapshot( + exists: bool, + mut up_to_page: PageIndex, + ) -> Result<(), &'static str> { + Self::ensure_target_snapshot(exists) + .and_then(|_| Self::ensure_voter_snapshot(exists, up_to_page)) + } + + pub(crate) fn ensure_full_snapshot() -> Result<(), &'static str> { + // if any number of pages supposed to exist, these must also exist. + ensure!(Self::desired_targets().is_some(), "desired target mismatch"); + ensure!(Self::targets_hash().is_some(), "targets hash mismatch"); + ensure!( + Self::targets_decode_len().unwrap_or_default() as u32 == + T::TargetSnapshotPerBlock::get(), + "targets decode length mismatch" + ); + + // ensure that voter pages that should exist, indeed to exist.. + for p in crate::Pallet::::lsp()..=crate::Pallet::::msp() { + ensure!( + Self::voters_hash(p).is_some() && + Self::voters_decode_len(p).unwrap_or_default() as u32 == + T::VoterSnapshotPerBlock::get(), + "voter page existence mismatch" + ); + } + + Ok(()) + } + + pub(crate) fn voters_decode_len(page: PageIndex) -> Option { + PagedVoterSnapshot::::decode_len(page) + } + + pub(crate) fn targets_decode_len() -> Option { + PagedTargetSnapshot::::decode_len(Pallet::::msp()) + } + + pub(crate) fn voters_hash(page: PageIndex) -> Option { + PagedVoterSnapshotHash::::get(page) + } + + pub(crate) fn sanity_check() -> Result<(), &'static str> { + // check the snapshot existence based on the phase. This checks all of the needed + // conditions except for the metadata values. + let phase = Pallet::::current_phase(); + let _ = match phase { + // no page should exist in this phase. + Phase::Off => Self::ensure_snapshot(false, T::Pages::get()), + + // we will star the snapshot in the next phase. + Phase::Snapshot(p) if p == T::Pages::get() => + Self::ensure_snapshot(false, T::Pages::get()), + // we are mid voter snapshot. + Phase::Snapshot(p) if p < T::Pages::get() && p > 0 => + Self::ensure_snapshot(true, T::Pages::get() - p - 1), + // we cannot check anything in this block -- we take the last page of the snapshot. + Phase::Snapshot(_) => Ok(()), + + // full snapshot must exist in these phases. + Phase::Emergency | + Phase::Signed(_) | + Phase::SignedValidation(_) | + Phase::Export(_) | + Phase::Done | + Phase::Unsigned(_) => Self::ensure_snapshot(true, T::Pages::get()), + }?; + + Ok(()) + } + } + + #[cfg(test)] + impl Snapshot { + pub(crate) fn voter_pages() -> PageIndex { + use sp_runtime::SaturatedConversion; + PagedVoterSnapshot::::iter().count().saturated_into::() + } + + pub(crate) fn target_pages() -> PageIndex { + use sp_runtime::SaturatedConversion; + PagedTargetSnapshot::::iter().count().saturated_into::() + } + + pub(crate) fn voters_iter_flattened() -> impl Iterator> { + let key_range = + (crate::Pallet::::lsp()..=crate::Pallet::::msp()).collect::>(); + key_range + .into_iter() + .flat_map(|k| PagedVoterSnapshot::::get(k).unwrap_or_default()) + } + + pub(crate) fn remove_voter_page(page: PageIndex) { + PagedVoterSnapshot::::remove(page); + } + + pub(crate) fn kill_desired_targets() { + DesiredTargets::::kill(); + } + + pub(crate) fn remove_target_page() { + PagedTargetSnapshot::::remove(Pallet::::msp()); + } + + pub(crate) fn remove_target(at: usize) { + PagedTargetSnapshot::::mutate(crate::Pallet::::msp(), |maybe_targets| { + if let Some(targets) = maybe_targets { + targets.remove(at); + // and update the hash. + PagedTargetSnapshotHash::::insert( + crate::Pallet::::msp(), + T::Hashing::hash(&targets.encode()), + ) + } else { + unreachable!(); + } + }) + } + } + + /// Desired number of targets to elect for this round. + #[pallet::storage] + type DesiredTargets = StorageValue<_, u32>; + /// Paginated voter snapshot. At most [`T::Pages`] keys will exist. + #[pallet::storage] + type PagedVoterSnapshot = + StorageMap<_, Twox64Concat, PageIndex, VoterPageOf>; + /// Same as [`PagedVoterSnapshot`], but it will store the hash of the snapshot. + /// + /// The hash is generated using [`frame_system::Config::Hashing`]. + #[pallet::storage] + type PagedVoterSnapshotHash = StorageMap<_, Twox64Concat, PageIndex, T::Hash>; + /// Paginated target snapshot. + /// + /// For the time being, since we assume one pages of targets, at most ONE key will exist. + #[pallet::storage] + type PagedTargetSnapshot = + StorageMap<_, Twox64Concat, PageIndex, BoundedVec>; + /// Same as [`PagedTargetSnapshot`], but it will store the hash of the snapshot. + /// + /// The hash is generated using [`frame_system::Config::Hashing`]. + #[pallet::storage] + type PagedTargetSnapshotHash = StorageMap<_, Twox64Concat, PageIndex, T::Hash>; + + #[pallet::pallet] + pub struct Pallet(PhantomData); +} + +impl Pallet { + /// Returns the most significant page of the snapshot. + /// + /// Based on the contract of `ElectionDataProvider`, this is the first page that is filled. + fn msp() -> PageIndex { + T::Pages::get().checked_sub(1).defensive_unwrap_or_default() + } + + /// Returns the least significant page of the snapshot. + /// + /// Based on the contract of `ElectionDataProvider`, this is the last page that is filled. + fn lsp() -> PageIndex { + Zero::zero() + } + + /// Return the `length` most significant pages. + /// + /// For example, if `Pages = 4`, and `length = 2`, our full snapshot range would be [0, + /// 1, 2, 3], with 3 being msp. But, in this case, then this returns `[2, 3]` two most + /// significant pages, in the old order. + pub fn msp_range_for(length: usize) -> Vec { + (Self::lsp()..Self::msp() + 1).rev().take(length).rev().collect::>() + } + + pub(crate) fn phase_transition(to: Phase) { + let from = Self::current_phase(); + use sp_std::mem::discriminant; + if discriminant(&from) != discriminant(&to) { + log!(debug, "transitioning phase from {:?} to {:?}", from, to); + Self::deposit_event(Event::PhaseTransitioned { from, to }); + } else { + log!(trace, "transitioning phase from {:?} to {:?}", from, to); + } + >::put(to); + } + + /// Perform all the basic checks that are independent of the snapshot. To be more specific, + /// these are all the checks that you can do without the need to read the massive blob of the + /// actual snapshot. This function only contains a handful of storage reads, with bounded size. + /// + /// A sneaky detail is that this does check the `DesiredTargets` aspect of the snapshot, but + /// neither of the large storage items. + /// + /// Moreover, we do optionally check the fingerprint of the snapshot, if provided. + /// + /// These complement a feasibility-check, which is exactly the opposite: snapshot-dependent + /// checks. + pub(crate) fn snapshot_independent_checks( + paged_solution: &PagedRawSolution, + maybe_snapshot_fingerprint: Option, + ) -> Result<(), CommonError> { + // Note that the order of these checks are critical for the correctness and performance of + // `restore_or_compute_then_maybe_submit`. We want to make sure that we always check round + // first, so that if it has a wrong round, we can detect and delete it from the cache right + // from the get go. + + // ensure round is current + ensure!(Self::round() == paged_solution.round, CommonError::WrongRound); + + // ensure score is being improved, if the claim is even correct. + ensure!( + ::ensure_claimed_score_improves(paged_solution.score), + CommonError::WeakSubmission, + ); + + // ensure solution pages are no more than the snapshot + ensure!( + paged_solution.solution_pages.len().saturated_into::() <= T::Pages::get(), + CommonError::WrongPageCount + ); + + // finally, check the winner count being correct. + if let Some(desired_targets) = Snapshot::::desired_targets() { + ensure!( + desired_targets == paged_solution.winner_count_single_page_target_snapshot() as u32, + CommonError::WrongWinnerCount + ) + } + + // check the snapshot fingerprint, if asked for. + ensure!( + maybe_snapshot_fingerprint + .map_or(true, |snapshot_fingerprint| Snapshot::::fingerprint() == + snapshot_fingerprint), + CommonError::WrongFingerprint + ); + + Ok(()) + } + + /// Creates the target snapshot. + pub(crate) fn create_targets_snapshot() -> Result<(), ElectionError> { + // if requested, get the targets as well. + Snapshot::::set_desired_targets( + T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?, + ); + + let count = T::TargetSnapshotPerBlock::get(); + let bounds = DataProviderBounds { count: Some(count.into()), size: None }; + let targets: BoundedVec<_, T::TargetSnapshotPerBlock> = + T::DataProvider::electable_targets(bounds, 0) + .and_then(|v| v.try_into().map_err(|_| "try-into failed")) + .map_err(ElectionError::DataProvider)?; + + let count = targets.len() as u32; + log!(debug, "created target snapshot with {} targets.", count); + Snapshot::::set_targets(targets); + + Ok(()) + } + + /// Creates the voter snapshot. + pub(crate) fn create_voters_snapshot_paged( + remaining: PageIndex, + ) -> Result<(), ElectionError> { + let count = T::VoterSnapshotPerBlock::get(); + let bounds = DataProviderBounds { count: Some(count.into()), size: None }; + let voters: BoundedVec<_, T::VoterSnapshotPerBlock> = + T::DataProvider::electing_voters(bounds, remaining) + .and_then(|v| v.try_into().map_err(|_| "try-into failed")) + .map_err(ElectionError::DataProvider)?; + + let count = voters.len() as u32; + Snapshot::::set_voters(remaining, voters); + log!(debug, "created voter snapshot with {} voters, {} remaining.", count, remaining); + + Ok(()) + } + + /// Perform the tasks to be done after a new `elect` has been triggered: + /// + /// 1. Increment round. + /// 2. Change phase to [`Phase::Off`] + /// 3. Clear all snapshot data. + pub(crate) fn rotate_round() { + // Inc round. + >::mutate(|r| *r += 1); + + // Phase is off now. + Self::phase_transition(Phase::Off); + + // Kill everything in the verifier. + T::Verifier::kill(); + + // Kill the snapshot. + Snapshot::::kill(); + } + + /// Call fallback for the given page. + /// + /// This uses the [`ElectionProvider::bother`] to check if the fallback is actually going to do + /// anything. If so, it will re-collect the associated snapshot page and do the fallback. Else, + /// it will early return without touching the snapshot. + fn fallback_for_page(page: PageIndex) -> Result, ElectionError> { + use frame_election_provider_support::InstantElectionProvider; + let (voters, targets, desired_targets) = if T::Fallback::bother() { + ( + Snapshot::::voters(page).ok_or(ElectionError::Other("snapshot!"))?, + Snapshot::::targets().ok_or(ElectionError::Other("snapshot!"))?, + Snapshot::::desired_targets().ok_or(ElectionError::Other("snapshot!"))?, + ) + } else { + (Default::default(), Default::default(), Default::default()) + }; + T::Fallback::instant_elect(voters.into_inner(), targets.into_inner(), desired_targets) + .map_err(|fe| ElectionError::Fallback(fe)) + } + + /// A reasonable next election block number. + pub fn average_election_duration() -> u32 { + let signed: u32 = T::SignedPhase::get().saturated_into(); + let unsigned: u32 = T::UnsignedPhase::get().saturated_into(); + let signed_validation: u32 = T::SignedValidationPhase::get().saturated_into(); + let snapshot = T::Pages::get(); + + // we don't count the export. + let _export = T::Pages::get(); + + snapshot + signed + signed_validation + unsigned + } + + #[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))] + pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), &'static str> { + Snapshot::::sanity_check() + } +} + +#[allow(unused)] +#[cfg(any(feature = "runtime-benchmarks", test))] +// helper code for testing and benchmarking +impl Pallet +where + T: Config + crate::signed::Config + crate::unsigned::Config + crate::verifier::Config, + BlockNumberFor: From, +{ + /// Progress blocks until the criteria is met. + pub(crate) fn roll_until_matches(criteria: impl FnOnce() -> bool + Copy) { + loop { + Self::roll_next(true, false); + if criteria() { + break + } + } + } + + /// Progress blocks until one block before the criteria is met. + pub(crate) fn roll_until_before_matches(criteria: impl FnOnce() -> bool + Copy) { + use frame_support::storage::TransactionOutcome; + loop { + let should_break = frame_support::storage::with_transaction( + || -> TransactionOutcome> { + Pallet::::roll_next(true, false); + if criteria() { + TransactionOutcome::Rollback(Ok(true)) + } else { + TransactionOutcome::Commit(Ok(false)) + } + }, + ) + .unwrap(); + + if should_break { + break + } + } + } + + pub(crate) fn roll_to_signed_and_mine_full_solution() -> PagedRawSolution { + use unsigned::miner::OffchainWorkerMiner; + Self::roll_until_matches(|| Self::current_phase().is_signed()); + // ensure snapshot is full. + crate::Snapshot::::ensure_full_snapshot().expect("Snapshot is not full"); + OffchainWorkerMiner::::mine_solution(T::Pages::get(), false) + .expect("mine_solution failed") + } + + pub(crate) fn submit_full_solution( + PagedRawSolution { score, solution_pages, .. }: PagedRawSolution, + ) -> DispatchResultWithPostInfo { + use frame_system::RawOrigin; + use sp_std::boxed::Box; + use types::Pagify; + + // register alice + let alice = crate::Pallet::::funded_account("alice", 0); + signed::Pallet::::register(RawOrigin::Signed(alice.clone()).into(), score)?; + + // submit pages + for (index, page) in solution_pages.pagify(T::Pages::get()) { + signed::Pallet::::submit_page( + RawOrigin::Signed(alice.clone()).into(), + index, + Some(Box::new(page.clone())), + ) + .inspect_err(|&e| { + log!(error, "submit_page {:?} failed: {:?}", page, e); + })?; + } + + Ok(().into()) + } + + pub(crate) fn roll_to_signed_and_submit_full_solution() -> DispatchResultWithPostInfo { + Self::submit_full_solution(Self::roll_to_signed_and_mine_full_solution()) + } + + fn funded_account(seed: &'static str, index: u32) -> T::AccountId { + use frame_benchmarking::whitelist; + use frame_support::traits::fungible::{Inspect, Mutate}; + let who: T::AccountId = frame_benchmarking::account(seed, index, 777); + whitelist!(who); + let balance = T::Currency::minimum_balance() * 1_0000_0000u32.into(); + T::Currency::mint_into(&who, balance).unwrap(); + who + } + + /// Roll all pallets forward, for the given number of blocks. + pub(crate) fn roll_to(n: BlockNumberFor, with_signed: bool, try_state: bool) { + let now = frame_system::Pallet::::block_number(); + assert!(n > now, "cannot roll to current or past block"); + let one: BlockNumberFor = 1u32.into(); + let mut i = now + one; + while i <= n { + frame_system::Pallet::::set_block_number(i); + + Pallet::::on_initialize(i); + verifier::Pallet::::on_initialize(i); + unsigned::Pallet::::on_initialize(i); + + if with_signed { + signed::Pallet::::on_initialize(i); + } + + // invariants must hold at the end of each block. + if try_state { + Pallet::::do_try_state(i).unwrap(); + verifier::Pallet::::do_try_state(i).unwrap(); + unsigned::Pallet::::do_try_state(i).unwrap(); + signed::Pallet::::do_try_state(i).unwrap(); + } + + i += one; + } + } + + /// Roll to next block. + pub(crate) fn roll_next(with_signed: bool, try_state: bool) { + Self::roll_to( + frame_system::Pallet::::block_number() + 1u32.into(), + with_signed, + try_state, + ); + } +} + +impl ElectionProvider for Pallet { + type AccountId = T::AccountId; + type BlockNumber = BlockNumberFor; + type Error = ElectionError; + type DataProvider = T::DataProvider; + type Pages = T::Pages; + type MaxWinnersPerPage = ::MaxWinnersPerPage; + type MaxBackersPerWinner = ::MaxBackersPerWinner; + + fn elect(remaining: PageIndex) -> Result, Self::Error> { + match Self::status() { + // we allow `elect` to be called as long as we have received a start signal. + Ok(_) => (), + Err(_) => return Err(ElectionError::NotOngoing), + } + + let result = T::Verifier::get_queued_solution_page(remaining) + .ok_or(ElectionError::SupportPageNotAvailable) + .or_else(|err: ElectionError| { + log!( + warn, + "primary election for page {} failed due to: {:?}, trying fallback", + remaining, + err, + ); + Self::fallback_for_page(remaining) + }) + .map_err(|err| { + // if any pages returns an error, we go into the emergency phase and don't do + // anything else anymore. This will prevent any new submissions to signed and + // unsigned pallet, and thus the verifier will also be almost stuck, except for the + // submission of emergency solutions. + log!(warn, "primary and fallback ({:?}) failed for page {:?}", err, remaining); + err + }) + .map(|supports| { + // convert to bounded + supports.into() + }); + + // if fallback has possibly put us into the emergency phase, don't do anything else. + if CurrentPhase::::get().is_emergency() && result.is_err() { + log!(error, "Emergency phase triggered, halting the election."); + } else { + if remaining.is_zero() { + log!(info, "receiving last call to elect(0), rotating round"); + Self::rotate_round() + } else { + Self::phase_transition(Phase::Export(remaining)) + } + } + + result + } + + fn start() -> Result<(), Self::Error> { + match Self::status() { + Err(()) => (), + Ok(_) => return Err(ElectionError::Ongoing), + } + + Self::phase_transition(Phase::::start_phase()); + Ok(()) + } + + fn duration() -> Self::BlockNumber { + Self::average_election_duration().into() + } + + fn status() -> Result { + match >::get() { + // we're not doing anything. + Phase::Off => Err(()), + + // we're doing sth but not read. + Phase::Signed(_) | + Phase::SignedValidation(_) | + Phase::Unsigned(_) | + Phase::Snapshot(_) | + Phase::Emergency => Ok(false), + + // we're ready + Phase::Done | Phase::Export(_) => Ok(true), + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn asap() { + // prepare our snapshot so we can "hopefully" run a fallback. + Self::create_targets_snapshot().unwrap(); + for p in (Self::lsp()..=Self::msp()).rev() { + Self::create_voters_snapshot_paged(p).unwrap() + } + } +} + +#[cfg(test)] +mod phase_rotation { + use super::{Event, *}; + use crate::{mock::*, Phase}; + use frame_election_provider_support::ElectionProvider; + use frame_support::traits::Hooks; + + #[test] + fn single_page() { + ExtBuilder::full() + .pages(1) + .election_start(13) + .fallback_mode(FallbackModes::Onchain) + .build_and_execute(|| { + // 0 -------- 14 15 --------- 20 ------------- 25 ---------- 30 + // | | | | | + // Snapshot Signed SignedValidation Unsigned elect() + + assert_eq!(System::block_number(), 0); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_ok!(Snapshot::::ensure_snapshot(false, 1)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(4); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_eq!(MultiBlock::round(), 0); + + roll_to(13); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1)); + assert_ok!(Snapshot::::ensure_snapshot(false, 3)); + + roll_to(14); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0)); + assert_ok!(Snapshot::::ensure_snapshot(true, 0)); + + roll_to(15); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(SignedPhase::get() - 1)); + assert_ok!(Snapshot::::ensure_snapshot(true, 1)); + assert_eq!(MultiBlock::round(), 0); + + assert_eq!( + multi_block_events_since_last_call(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(1) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + } + ] + ); + + roll_to(19); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(0)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(20); + assert_eq!( + MultiBlock::current_phase(), + Phase::SignedValidation(SignedValidationPhase::get() - 1) + ); + assert_ok!(Snapshot::::ensure_snapshot(true, 1)); + assert_eq!(MultiBlock::round(), 0); + + assert_eq!( + multi_block_events_since_last_call(), + vec![Event::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(SignedValidationPhase::get() - 1) + }], + ); + + roll_to(24); + assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(0)); + assert_ok!(Snapshot::::ensure_snapshot(true, 1)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(25); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(UnsignedPhase::get() - 1)); + assert_eq!( + multi_block_events_since_last_call(), + vec![Event::PhaseTransitioned { + from: Phase::SignedValidation(0), + to: Phase::Unsigned(UnsignedPhase::get() - 1) + }], + ); + + roll_to(29); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(0)); + + // We stay in done otherwise + roll_to(30); + assert!(MultiBlock::current_phase().is_done()); + + // We stay in done otherwise + roll_to(31); + assert!(MultiBlock::current_phase().is_done()); + + // We close when upstream tells us to elect. + roll_to(32); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + assert_ok!(Snapshot::::ensure_snapshot(true, 1)); + + MultiBlock::elect(0).unwrap(); + + assert!(MultiBlock::current_phase().is_off()); + assert_ok!(Snapshot::::ensure_snapshot(false, 1)); + assert_eq!(MultiBlock::round(), 1); + + roll_to(42); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + }) + } + + #[test] + fn multi_page_2() { + ExtBuilder::full() + .pages(2) + .fallback_mode(FallbackModes::Onchain) + .election_start(12) + .build_and_execute(|| { + // 0 -------13 14 15 ------- 20 ---- 25 ------- 30 + // | | | | | + // Snapshot Signed SigValid Unsigned Elect + + assert_eq!(System::block_number(), 0); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_ok!(Snapshot::::ensure_snapshot(false, 2)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(4); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_eq!(MultiBlock::round(), 0); + + roll_to(11); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_eq!(MultiBlock::round(), 0); + + roll_to(12); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2)); + assert_ok!(Snapshot::::ensure_snapshot(false, 2)); + + roll_to(13); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1)); + assert_ok!(Snapshot::::ensure_snapshot(true, 0)); + + roll_to(14); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0)); + assert_ok!(Snapshot::::ensure_snapshot(true, 1)); + + roll_to(15); + assert_ok!(Snapshot::::ensure_snapshot(true, 2)); + assert_eq!(MultiBlock::round(), 0); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(SignedPhase::get() - 1)); + + assert_eq!( + multi_block_events_since_last_call(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + } + ] + ); + + roll_to(19); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(0)); + assert_ok!(Snapshot::::ensure_snapshot(true, 2)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(20); + assert_ok!(Snapshot::::ensure_snapshot(true, 2)); + assert_eq!(MultiBlock::round(), 0); + assert_eq!( + MultiBlock::current_phase(), + Phase::SignedValidation(SignedValidationPhase::get() - 1) + ); + + assert_eq!( + multi_block_events_since_last_call(), + vec![Event::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(SignedValidationPhase::get() - 1) + }], + ); + + roll_to(24); + assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(0)); + assert_ok!(Snapshot::::ensure_snapshot(true, 2)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(25); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(UnsignedPhase::get() - 1)); + assert_ok!(Snapshot::::ensure_snapshot(true, 2)); + assert_eq!(MultiBlock::round(), 0); + + assert_eq!( + multi_block_events_since_last_call(), + vec![Event::PhaseTransitioned { + from: Phase::SignedValidation(0), + to: Phase::Unsigned(UnsignedPhase::get() - 1) + }], + ); + + roll_to(29); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(0)); + assert_ok!(Snapshot::::ensure_snapshot(true, 2)); + + roll_to(30); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + assert_ok!(Snapshot::::ensure_snapshot(true, 2)); + + // We close when upstream tells us to elect. + roll_to(32); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + + // and even this one's coming from the fallback. + MultiBlock::elect(0).unwrap(); + assert!(MultiBlock::current_phase().is_off()); + + // all snapshots are gone. + assert_ok!(Snapshot::::ensure_snapshot(false, 2)); + assert_eq!(MultiBlock::round(), 1); + }) + } + + #[test] + fn multi_page_3() { + ExtBuilder::full() + .pages(3) + .fallback_mode(FallbackModes::Onchain) + .build_and_execute(|| { + // 0 ------- 12 13 14 15 ----------- 20 ---------25 ------- 30 + // | | | | | + // Snapshot Signed SignedValidation Unsigned Elect + + assert_eq!(System::block_number(), 0); + assert!(MultiBlock::current_phase().is_off()); + assert_ok!(Snapshot::::ensure_snapshot(false, 3)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(10); + assert!(MultiBlock::current_phase().is_off()); + assert_eq!(MultiBlock::round(), 0); + + roll_to(11); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(3)); + // no snapshot is take yet, we start at the next block + assert_ok!(Snapshot::::ensure_snapshot(false, 3)); + + roll_to(12); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2)); + assert_ok!(Snapshot::::ensure_snapshot(true, 0)); + + roll_to(13); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1)); + assert_ok!(Snapshot::::ensure_snapshot(true, 1)); + + roll_to(14); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0)); + assert_ok!(Snapshot::::ensure_snapshot(true, 2)); + + roll_to(15); + assert_ok!(Snapshot::::ensure_snapshot(true, Pages::get())); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(4)); + assert_eq!( + multi_block_events_since_last_call(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + } + ] + ); + assert_eq!(MultiBlock::round(), 0); + + roll_to(19); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(0)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(20); + assert_eq!( + MultiBlock::current_phase(), + Phase::SignedValidation(SignedValidationPhase::get() - 1) + ); + assert_eq!( + multi_block_events_since_last_call(), + vec![Event::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(SignedValidationPhase::get() - 1) + }] + ); + + roll_to(24); + assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(0)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(25); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(UnsignedPhase::get() - 1)); + assert_eq!( + multi_block_events_since_last_call(), + vec![Event::PhaseTransitioned { + from: Phase::SignedValidation(0), + to: Phase::Unsigned(UnsignedPhase::get() - 1) + }] + ); + + roll_to(29); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(0)); + + roll_to(30); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + + // We close when upstream tells us to elect. + roll_to(32); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + + MultiBlock::elect(0).unwrap(); + assert!(MultiBlock::current_phase().is_off()); + + // all snapshots are gone. + assert_none_snapshot(); + assert_eq!(MultiBlock::round(), 1); + }) + } + + #[test] + fn no_unsigned_phase() { + ExtBuilder::full() + .pages(3) + .unsigned_phase(0) + .election_start(16) + .fallback_mode(FallbackModes::Onchain) + .build_and_execute(|| { + // 0 --------------------- 17 ------ 20 ---------25 ------- 30 + // | | | | | + // Snapshot Signed SignedValidation Elect + + assert_eq!(System::block_number(), 0); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_none_snapshot(); + assert_eq!(MultiBlock::round(), 0); + + roll_to(4); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_eq!(MultiBlock::round(), 0); + + roll_to(16); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(3)); + + roll_to(17); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2)); + + roll_to(18); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1)); + + roll_to(19); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0)); + + roll_to(20); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(SignedPhase::get() - 1)); + + assert_full_snapshot(); + assert_eq!(MultiBlock::round(), 0); + + roll_to(25); + assert_eq!( + MultiBlock::current_phase(), + Phase::SignedValidation(SignedValidationPhase::get() - 1) + ); + + assert_eq!( + multi_block_events_since_last_call(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + }, + Event::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(SignedValidationPhase::get() - 1) + }, + ] + ); + + // last block of signed validation + roll_to(29); + assert_eq!(MultiBlock::current_phase(), Phase::SignedValidation(0)); + + // we are done now + roll_to(30); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + + roll_to(31); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + + MultiBlock::elect(0).unwrap(); + assert!(MultiBlock::current_phase().is_off()); + + // all snapshots are gone. + assert_none_snapshot(); + assert_eq!(MultiBlock::round(), 1); + assert_ok!(signed::Submissions::::ensure_killed(0)); + verifier::QueuedSolution::::assert_killed(); + }) + } + + #[test] + fn no_signed_phase() { + ExtBuilder::full() + .pages(3) + .signed_phase(0, 0) + .election_start(21) + .fallback_mode(FallbackModes::Onchain) + .build_and_execute(|| { + // 0 ------------------------- 22 ------ 25 ------- 30 + // | | | + // Snapshot Unsigned Elect + + assert_eq!(System::block_number(), 0); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_none_snapshot(); + assert_eq!(MultiBlock::round(), 0); + + roll_to(20); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_eq!(MultiBlock::round(), 0); + + roll_to(21); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(3)); + roll_to(22); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2)); + roll_to(23); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1)); + roll_to(24); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0)); + + roll_to(25); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(UnsignedPhase::get() - 1)); + assert_full_snapshot(); + assert_eq!(MultiBlock::round(), 0); + + assert_eq!( + multi_block_events(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Unsigned(UnsignedPhase::get() - 1) + }, + ] + ); + + roll_to(29); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(0)); + + roll_to(30); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + roll_to(31); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + + // eventually the call to elect comes, and we exit done phase. + MultiBlock::elect(0).unwrap(); + assert!(MultiBlock::current_phase().is_off()); + + // all snapshots are gone. + assert_none_snapshot(); + assert_eq!(MultiBlock::round(), 1); + assert_ok!(signed::Submissions::::ensure_killed(0)); + verifier::QueuedSolution::::assert_killed(); + }) + } + + #[test] + fn no_signed_and_unsigned_phase() { + ExtBuilder::full() + .pages(3) + .signed_phase(0, 0) + .unsigned_phase(0) + .election_start(10) + .fallback_mode(FallbackModes::Onchain) + .build_and_execute(|| { + assert_eq!(System::block_number(), 0); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert_none_snapshot(); + assert_eq!(MultiBlock::round(), 0); + + roll_to(10); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(3)); + assert_eq!(MultiBlock::round(), 0); + + roll_to(11); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(2)); + roll_to(12); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(1)); + roll_to(13); + assert_eq!(MultiBlock::current_phase(), Phase::Snapshot(0)); + + // And we are done already + roll_to(14); + assert_eq!(MultiBlock::current_phase(), Phase::Done); + }); + } + + #[test] + #[should_panic( + expected = "signed validation phase should be at least as long as the number of pages" + )] + fn incorrect_signed_validation_phase() { + ExtBuilder::full() + .pages(3) + .signed_validation_phase(2) + .build_and_execute(|| >::integrity_test()) + } + + #[test] + fn are_we_done_back_to_signed() { + ExtBuilder::full() + .are_we_done(AreWeDoneModes::BackToSigned) + .build_and_execute(|| { + // roll to unsigned + roll_to_last_unsigned(); + + assert_eq!(MultiBlock::round(), 0); + assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(0)); + assert_eq!( + multi_block_events_since_last_call(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }, + Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed(4) }, + Event::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(4) + }, + Event::PhaseTransitioned { + from: Phase::SignedValidation(0), + to: Phase::Unsigned(4) + } + ] + ); + + roll_next(); + // we are back to signed phase + assert_eq!(MultiBlock::current_phase(), Phase::Signed(SignedPhase::get() - 1)); + // round is still the same + assert_eq!(MultiBlock::round(), 0); + + // we proceed to normally again: + roll_next(); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(SignedPhase::get() - 2)); + + roll_next(); + assert_eq!(MultiBlock::current_phase(), Phase::Signed(SignedPhase::get() - 3)); + }); + } +} + +#[cfg(test)] +mod election_provider { + use super::*; + use crate::{mock::*, unsigned::miner::OffchainWorkerMiner, verifier::Verifier, Phase}; + use frame_election_provider_support::{BoundedSupport, BoundedSupports, ElectionProvider}; + use frame_support::{ + assert_storage_noop, testing_prelude::bounded_vec, unsigned::ValidateUnsigned, + }; + + // This is probably the most important test of all, a basic, correct scenario. This test should + // be studied in detail, and all of the branches of how it can go wrong or diverge from the + // basic scenario assessed. + #[test] + fn multi_page_elect_simple_works() { + ExtBuilder::full().build_and_execute(|| { + roll_to_signed_open(); + assert!(MultiBlock::current_phase().is_signed()); + + // load a solution into the verifier + let paged = OffchainWorkerMiner::::mine_solution(Pages::get(), false).unwrap(); + let score = paged.score; + + // now let's submit this one by one, into the signed phase. + load_signed_for_verification(99, paged); + + // now the solution should start being verified. + roll_to_signed_validation_open(); + + assert_eq!( + multi_block_events(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + }, + Event::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(SignedValidationPhase::get() - 1) + } + ] + ); + assert_eq!(verifier_events(), vec![]); + + // there is no queued solution prior to the last page of the solution getting verified + assert_eq!(::Verifier::queued_score(), None); + + // proceed until it is fully verified. + roll_next(); + assert_eq!(verifier_events(), vec![verifier::Event::Verified(2, 2)]); + + roll_next(); + assert_eq!( + verifier_events(), + vec![verifier::Event::Verified(2, 2), verifier::Event::Verified(1, 2)] + ); + + roll_next(); + assert_eq!( + verifier_events(), + vec![ + verifier::Event::Verified(2, 2), + verifier::Event::Verified(1, 2), + verifier::Event::Verified(0, 2), + verifier::Event::Queued(score, None), + ] + ); + + // there is now a queued solution. + assert_eq!(::Verifier::queued_score(), Some(score)); + + // now let's go to unsigned phase, but we don't expect anything to happen there since we + // don't run OCWs. + roll_to_unsigned_open(); + + // pre-elect state + assert!(MultiBlock::current_phase().is_unsigned_opened_now()); + assert_eq!(MultiBlock::round(), 0); + assert_full_snapshot(); + + // call elect for each page + let _paged_solution = (MultiBlock::lsp()..MultiBlock::msp()) + .rev() // 2, 1, 0 + .map(|page| { + MultiBlock::elect(page as PageIndex).unwrap(); + if page == 0 { + assert!(MultiBlock::current_phase().is_off()) + } else { + assert!(MultiBlock::current_phase().is_export()) + } + }) + .collect::>(); + + // after the last elect, verifier is cleared, + verifier::QueuedSolution::::assert_killed(); + // the phase is off, + assert_eq!(MultiBlock::current_phase(), Phase::Off); + // the round is incremented, + assert_eq!(Round::::get(), 1); + // and the snapshot is cleared, + assert_storage_noop!(Snapshot::::kill()); + // signed pallet is clean. + // NOTE: in the future, if and when we add lazy cleanup to the signed pallet, this + // assertion might break. + assert_ok!(signed::Submissions::::ensure_killed(0)); + }); + } + + #[test] + fn multi_page_elect_fast_track() { + ExtBuilder::full().build_and_execute(|| { + roll_to_signed_open(); + let round = MultiBlock::round(); + assert!(MultiBlock::current_phase().is_signed()); + + // load a solution into the verifier + let paged = OffchainWorkerMiner::::mine_solution(Pages::get(), false).unwrap(); + let score = paged.score; + load_signed_for_verification_and_start(99, paged, 0); + + // there is no queued solution prior to the last page of the solution getting verified + assert_eq!(::Verifier::queued_score(), None); + + // roll to the block it is finalized + roll_next(); + roll_next(); + roll_next(); + assert_eq!( + verifier_events(), + vec![ + verifier::Event::Verified(2, 2), + verifier::Event::Verified(1, 2), + verifier::Event::Verified(0, 2), + verifier::Event::Queued(score, None), + ] + ); + + // there is now a queued solution. + assert_eq!(::Verifier::queued_score(), Some(score)); + + // not much impact, just for the sane-ness of the test. + roll_to_unsigned_open(); + + // pre-elect state: + assert!(MultiBlock::current_phase().is_unsigned_opened_now()); + assert_eq!(Round::::get(), 0); + assert_full_snapshot(); + + // there are 3 pages (indexes 2..=0), but we short circuit by just calling 0. + let _solution = crate::Pallet::::elect(0).unwrap(); + + // round is incremented. + assert_eq!(MultiBlock::round(), round + 1); + // after elect(0) is called, verifier is cleared, + verifier::QueuedSolution::::assert_killed(); + // the phase is off, + assert_eq!(MultiBlock::current_phase(), Phase::Off); + // the round is incremented, + assert_eq!(Round::::get(), 1); + // the snapshot is cleared, + assert_none_snapshot(); + // and signed pallet is clean. + assert_ok!(signed::Submissions::::ensure_killed(round)); + }); + } + + #[test] + fn elect_does_not_finish_without_call_of_page_0() { + ExtBuilder::full().build_and_execute(|| { + roll_to_signed_open(); + assert!(MultiBlock::current_phase().is_signed()); + + // load a solution into the verifier + let paged = OffchainWorkerMiner::::mine_solution(Pages::get(), false).unwrap(); + let score = paged.score; + load_signed_for_verification_and_start(99, paged, 0); + + // there is no queued solution prior to the last page of the solution getting verified + assert_eq!(::Verifier::queued_score(), None); + + // roll to the block it is finalized + roll_next(); + roll_next(); + roll_next(); + assert_eq!( + verifier_events(), + vec![ + verifier::Event::Verified(2, 2), + verifier::Event::Verified(1, 2), + verifier::Event::Verified(0, 2), + verifier::Event::Queued(score, None), + ] + ); + + // there is now a queued solution + assert_eq!(::Verifier::queued_score(), Some(score)); + + // not much impact, just for the sane-ness of the test. + roll_to_unsigned_open(); + + // pre-elect state: + assert!(MultiBlock::current_phase().is_unsigned_opened_now()); + assert_eq!(Round::::get(), 0); + assert_full_snapshot(); + + // call elect for page 2 and 1, but NOT 0 + let solutions = (1..=MultiBlock::msp()) + .rev() // 2, 1 + .map(|page| { + crate::Pallet::::elect(page as PageIndex).unwrap(); + assert!(MultiBlock::current_phase().is_export()); + }) + .collect::>(); + assert_eq!(solutions.len(), 2); + + // nothing changes from the prelect state, except phase is now export. + assert!(MultiBlock::current_phase().is_export()); + assert_eq!(Round::::get(), 0); + assert_full_snapshot(); + }); + } + + #[test] + fn skip_unsigned_phase() { + ExtBuilder::full().build_and_execute(|| { + roll_to_signed_open(); + assert!(MultiBlock::current_phase().is_signed()); + let round = MultiBlock::round(); + + // load a solution into the verifier + let paged = OffchainWorkerMiner::::mine_solution(Pages::get(), false).unwrap(); + + load_signed_for_verification_and_start_and_roll_to_verified(99, paged, 0); + + // and right here, in the middle of the signed verification phase, we close the round. + // Everything should work fine. + assert!(matches!(MultiBlock::current_phase(), Phase::SignedValidation(_))); + assert_eq!(Round::::get(), 0); + assert_full_snapshot(); + + // fetch all pages. + let _paged_solution = (MultiBlock::lsp()..MultiBlock::msp()) + .rev() // 2, 1, 0 + .map(|page| { + MultiBlock::elect(page as PageIndex).unwrap(); + if page == 0 { + assert!(MultiBlock::current_phase().is_off()) + } else { + assert!(MultiBlock::current_phase().is_export()) + } + }) + .collect::>(); + + // round is incremented. + assert_eq!(MultiBlock::round(), round + 1); + // after elect(0) is called, verifier is cleared, + verifier::QueuedSolution::::assert_killed(); + // the phase is off, + assert_eq!(MultiBlock::current_phase(), Phase::Off); + // the snapshot is cleared, + assert_storage_noop!(Snapshot::::kill()); + // and signed pallet is clean. + assert_ok!(signed::Submissions::::ensure_killed(round)); + }); + } + + #[test] + fn call_to_elect_should_prevent_any_submission() { + ExtBuilder::full().build_and_execute(|| { + roll_to_signed_open(); + assert!(MultiBlock::current_phase().is_signed()); + + // load a solution into the verifier + let paged = OffchainWorkerMiner::::mine_solution(Pages::get(), false).unwrap(); + load_signed_for_verification_and_start_and_roll_to_verified(99, paged, 0); + + assert!(matches!(MultiBlock::current_phase(), Phase::SignedValidation(_))); + + // fetch one page. + assert!(MultiBlock::elect(MultiBlock::msp()).is_ok()); + + // try submit one signed page: + assert_noop!( + SignedPallet::submit_page(RuntimeOrigin::signed(999), 0, Default::default()), + crate::signed::Error::::PhaseNotSigned, + ); + assert_noop!( + SignedPallet::register(RuntimeOrigin::signed(999), Default::default()), + crate::signed::Error::::PhaseNotSigned, + ); + assert_storage_noop!(assert!(::pre_dispatch( + &unsigned::Call::submit_unsigned { paged_solution: Default::default() } + ) + .is_err())); + }); + } + + #[test] + fn multi_page_onchain_elect_fallback_works() { + ExtBuilder::full().fallback_mode(FallbackModes::Onchain).build_and_execute(|| { + roll_to_signed_open(); + + // same targets, but voters from page 2 (1, 2, 3, 4, see `mock/staking`). + assert_eq!( + MultiBlock::elect(2).unwrap(), + BoundedSupports(bounded_vec![ + (10, BoundedSupport { total: 15, voters: bounded_vec![(1, 10), (4, 5)] }), + ( + 40, + BoundedSupport { + total: 25, + voters: bounded_vec![(2, 10), (3, 10), (4, 5)] + } + ) + ]) + ); + // page 1 of voters + assert_eq!( + MultiBlock::elect(1).unwrap(), + BoundedSupports(bounded_vec![ + (10, BoundedSupport { total: 15, voters: bounded_vec![(5, 5), (8, 10)] }), + ( + 30, + BoundedSupport { + total: 25, + voters: bounded_vec![(5, 5), (6, 10), (7, 10)] + } + ) + ]) + ); + // self votes + assert_eq!( + MultiBlock::elect(0).unwrap(), + BoundedSupports(bounded_vec![ + (30, BoundedSupport { total: 30, voters: bounded_vec![(30, 30)] }), + (40, BoundedSupport { total: 40, voters: bounded_vec![(40, 40)] }) + ]) + ); + + assert_eq!( + multi_block_events(), + vec![ + Event::PhaseTransitioned { + from: Phase::Off, + to: Phase::Snapshot(Pages::get()) + }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + }, + Event::PhaseTransitioned { + from: Phase::Signed(SignedPhase::get() - 1), + to: Phase::Export(2) + }, + Event::PhaseTransitioned { from: Phase::Export(1), to: Phase::Off } + ] + ); + assert_eq!(verifier_events(), vec![]); + + // This will set us to emergency phase, because we don't know wtf to do. + assert_eq!(MultiBlock::current_phase(), Phase::Off); + }); + } + + #[test] + fn multi_page_fallback_shortcut_to_msp_works() { + ExtBuilder::full().fallback_mode(FallbackModes::Onchain).build_and_execute(|| { + roll_to_signed_open(); + + // but then we immediately call `elect`, this will work + assert!(MultiBlock::elect(0).is_ok()); + + assert_eq!( + multi_block_events(), + vec![ + Event::PhaseTransitioned { + from: Phase::Off, + to: Phase::Snapshot(Pages::get()) + }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + }, + Event::PhaseTransitioned { + from: Phase::Signed(SignedPhase::get() - 1), + to: Phase::Off + } + ] + ); + + // This will set us to the off phase, since fallback saved us. + assert_eq!(MultiBlock::current_phase(), Phase::Off); + }); + } + + #[test] + #[should_panic] + fn continue_fallback_works() { + todo!() + } + + #[test] + #[should_panic] + fn emergency_fallback_works() { + todo!(); + } + + #[test] + fn elect_call_when_not_ongoing() { + ExtBuilder::full().fallback_mode(FallbackModes::Onchain).build_and_execute(|| { + roll_to_snapshot_created(); + assert_eq!(MultiBlock::status(), Ok(false)); + assert!(MultiBlock::elect(0).is_ok()); + }); + ExtBuilder::full().fallback_mode(FallbackModes::Onchain).build_and_execute(|| { + roll_to(10); + assert_eq!(MultiBlock::status(), Err(())); + assert_eq!(MultiBlock::elect(0), Err(ElectionError::NotOngoing)); + }); + } +} + +#[cfg(test)] +mod admin_ops { + use super::*; + use crate::mock::*; + use frame_support::assert_ok; + + #[test] + fn set_solution_emergency_works() { + ExtBuilder::full().build_and_execute(|| { + roll_to_signed_open(); + + // we get a call to elect(0). this will cause emergency, since no fallback is allowed. + assert_eq!( + MultiBlock::elect(0), + Err(ElectionError::Fallback("Emergency phase started.".to_string())) + ); + assert_eq!(MultiBlock::current_phase(), Phase::Emergency); + + // we can now set the solution to emergency. + let (emergency, score) = emergency_solution(); + assert_ok!(MultiBlock::manage( + RuntimeOrigin::root(), + AdminOperation::EmergencySetSolution(Box::new(emergency), score) + )); + + assert_eq!(MultiBlock::current_phase(), Phase::Emergency); + assert_ok!(MultiBlock::elect(0)); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + + assert_eq!( + multi_block_events(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + }, + Event::PhaseTransitioned { + from: Phase::Signed(SignedPhase::get() - 1), + to: Phase::Emergency + }, + Event::PhaseTransitioned { from: Phase::Emergency, to: Phase::Off } + ] + ); + assert_eq!( + verifier_events(), + vec![verifier::Event::Queued( + ElectionScore { minimal_stake: 55, sum_stake: 130, sum_stake_squared: 8650 }, + None + )] + ); + }) + } + + #[test] + fn trigger_fallback_works() { + ExtBuilder::full() + .fallback_mode(FallbackModes::Emergency) + .build_and_execute(|| { + roll_to_signed_open(); + + // we get a call to elect(0). this will cause emergency, since no fallback is + // allowed. + assert_eq!( + MultiBlock::elect(0), + Err(ElectionError::Fallback("Emergency phase started.".to_string())) + ); + assert_eq!(MultiBlock::current_phase(), Phase::Emergency); + + // we can now set the solution to emergency, assuming fallback is set to onchain + FallbackMode::set(FallbackModes::Onchain); + assert_ok!(MultiBlock::manage( + RuntimeOrigin::root(), + AdminOperation::EmergencyFallback + )); + + assert_eq!(MultiBlock::current_phase(), Phase::Emergency); + assert_ok!(MultiBlock::elect(0)); + assert_eq!(MultiBlock::current_phase(), Phase::Off); + + assert_eq!( + multi_block_events(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + }, + Event::PhaseTransitioned { + from: Phase::Signed(SignedPhase::get() - 1), + to: Phase::Emergency + }, + Event::PhaseTransitioned { from: Phase::Emergency, to: Phase::Off } + ] + ); + assert_eq!( + verifier_events(), + vec![verifier::Event::Queued( + ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 }, + None + )] + ); + }) + } + + #[test] + #[should_panic] + fn force_rotate_round() { + // clears the snapshot and verifier data. + // leaves the signed data as is since we bump the round. + todo!(); + } + + #[test] + fn set_minimum_solution_score() { + ExtBuilder::full().build_and_execute(|| { + assert_eq!(VerifierPallet::minimum_score(), None); + assert_ok!(MultiBlock::manage( + RuntimeOrigin::root(), + AdminOperation::SetMinUntrustedScore(ElectionScore { + minimal_stake: 100, + ..Default::default() + }) + )); + assert_eq!( + VerifierPallet::minimum_score().unwrap(), + ElectionScore { minimal_stake: 100, ..Default::default() } + ); + }); + } +} diff --git a/substrate/frame/election-provider-multi-block/src/mock/mod.rs b/substrate/frame/election-provider-multi-block/src/mock/mod.rs new file mode 100644 index 0000000000000..a03e726d2f8b3 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/mock/mod.rs @@ -0,0 +1,762 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The overarching mock crate for all EPMB pallets. + +mod signed; +mod staking; + +use super::*; +use crate::{ + self as multi_block, + signed::{self as signed_pallet, HoldReason}, + unsigned::{ + self as unsigned_pallet, + miner::{MinerConfig, OffchainMinerError, OffchainWorkerMiner}, + }, + verifier::{self as verifier_pallet, AsynchronousVerifier, Status}, +}; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + InstantElectionProvider, NposSolution, SequentialPhragmen, +}; +pub use frame_support::{assert_noop, assert_ok}; +use frame_support::{ + derive_impl, parameter_types, + traits::{fungible::InspectHold, Hooks}, + weights::{constants, Weight}, +}; +use frame_system::EnsureRoot; +use parking_lot::RwLock; +pub use signed::*; +use sp_core::{ + offchain::{ + testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, + }, + ConstBool, +}; +use sp_npos_elections::EvaluateSupport; +use sp_runtime::{ + bounded_vec, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, PerU16, Perbill, +}; +pub use staking::*; +use std::{sync::Arc, vec}; + +pub type Extrinsic = sp_runtime::testing::TestXt; + +pub type Balance = u64; +pub type AccountId = u64; +pub type BlockNumber = u64; +pub type VoterIndex = u32; +pub type TargetIndex = u16; + +frame_support::construct_runtime!( + pub enum Runtime { + System: frame_system, + Balances: pallet_balances, + MultiBlock: multi_block, + SignedPallet: signed_pallet, + VerifierPallet: verifier_pallet, + UnsignedPallet: unsigned_pallet, + } +); + +frame_election_provider_support::generate_solution_type!( + pub struct TestNposSolution::< + VoterIndex = VoterIndex, + TargetIndex = TargetIndex, + Accuracy = PerU16, + MaxVoters = ConstU32::<2_000> + >(16) +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type BlockLength = (); + type BlockWeights = BlockWeights; + type AccountData = pallet_balances::AccountData; + type Block = frame_system::mocking::MockBlock; +} + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +parameter_types! { + pub const ExistentialDeposit: Balance = 1; + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults( + Weight::from_parts(2u64 * constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), + NORMAL_DISPATCH_RATIO, + ); +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type WeightInfo = (); +} + +#[allow(unused)] +#[derive(Clone, Debug)] +pub enum FallbackModes { + Continue, + Emergency, + Onchain, +} + +#[derive(Clone, Debug)] +pub enum AreWeDoneModes { + Proceed, + BackToSigned, +} + +parameter_types! { + // The block at which we emit the start signal. This is used in `roll_next`, which is used all + // across tests. The number comes across as a bit weird, but this is mainly due to backwards + // compatibility with olds tests, when we used to have pull based election prediction. + pub static ElectionStart: BlockNumber = 11; + + + pub static Pages: PageIndex = 3; + pub static UnsignedPhase: BlockNumber = 5; + pub static SignedPhase: BlockNumber = 5; + pub static SignedValidationPhase: BlockNumber = 5; + + pub static FallbackMode: FallbackModes = FallbackModes::Emergency; + pub static MinerTxPriority: u64 = 100; + pub static SolutionImprovementThreshold: Perbill = Perbill::zero(); + pub static OffchainRepeat: BlockNumber = 5; + pub static MinerMaxLength: u32 = 256; + pub static MinerPages: u32 = 1; + pub static MaxVotesPerVoter: u32 = ::LIMIT as u32; + + // by default we stick to 3 pages to host our 12 voters. + pub static VoterSnapshotPerBlock: VoterIndex = 4; + // and 4 targets, whom we fetch all. + pub static TargetSnapshotPerBlock: TargetIndex = 4; + + // we have 12 voters in the default setting, this should be enough to make sure they are not + // trimmed accidentally in any test. + #[derive(Encode, Decode, PartialEq, Eq, Debug, scale_info::TypeInfo, MaxEncodedLen)] + pub static MaxBackersPerWinner: u32 = 12; + pub static MaxBackersPerWinnerFinal: u32 = 12; + // we have 4 targets in total and we desire `Desired` thereof, no single page can represent more + // than the min of these two. + #[derive(Encode, Decode, PartialEq, Eq, Debug, scale_info::TypeInfo, MaxEncodedLen)] + pub static MaxWinnersPerPage: u32 = (staking::Targets::get().len() as u32).min(staking::DesiredTargets::get()); + pub static AreWeDone: AreWeDoneModes = AreWeDoneModes::Proceed; +} + +impl Get> for AreWeDone { + fn get() -> Phase { + match >::get() { + AreWeDoneModes::BackToSigned => RevertToSignedIfNotQueuedOf::::get(), + AreWeDoneModes::Proceed => ProceedRegardlessOf::::get(), + } + } +} + +impl crate::verifier::Config for Runtime { + type SolutionImprovementThreshold = SolutionImprovementThreshold; + type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; + type SolutionDataProvider = signed::DualSignedPhase; + type WeightInfo = (); +} + +impl crate::unsigned::Config for Runtime { + type MinerPages = MinerPages; + type OffchainRepeat = OffchainRepeat; + type MinerTxPriority = MinerTxPriority; + type OffchainSolver = SequentialPhragmen; + type WeightInfo = (); +} + +impl MinerConfig for Runtime { + type AccountId = AccountId; + type Hash = ::Hash; + type MaxLength = MinerMaxLength; + type Pages = Pages; + type MaxVotesPerVoter = MaxVotesPerVoter; + type Solution = TestNposSolution; + type Solver = SequentialPhragmen; + type TargetSnapshotPerBlock = TargetSnapshotPerBlock; + type VoterSnapshotPerBlock = VoterSnapshotPerBlock; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal; + type MaxWinnersPerPage = MaxWinnersPerPage; +} + +impl crate::Config for Runtime { + type SignedPhase = SignedPhase; + type SignedValidationPhase = SignedValidationPhase; + type UnsignedPhase = UnsignedPhase; + type DataProvider = staking::MockStaking; + type Fallback = MockFallback; + type TargetSnapshotPerBlock = TargetSnapshotPerBlock; + type VoterSnapshotPerBlock = VoterSnapshotPerBlock; + type MinerConfig = Self; + type WeightInfo = (); + type Verifier = VerifierPallet; + type AdminOrigin = EnsureRoot; + type Pages = Pages; + type AreWeDone = AreWeDone; +} + +parameter_types! { + pub static OnChainElectionBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); +} + +impl onchain::Config for Runtime { + type DataProvider = staking::MockStaking; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; + type Sort = ConstBool; + type Solver = SequentialPhragmen; + type System = Runtime; + type WeightInfo = (); + type Bounds = OnChainElectionBounds; +} + +pub struct MockFallback; +impl ElectionProvider for MockFallback { + type AccountId = AccountId; + type BlockNumber = u64; + type Error = String; + type DataProvider = staking::MockStaking; + type Pages = ConstU32<1>; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; + + fn elect(_remaining: PageIndex) -> Result, Self::Error> { + unreachable!() + } + + fn duration() -> Self::BlockNumber { + 0 + } + + fn start() -> Result<(), Self::Error> { + Ok(()) + } + + fn status() -> Result { + Ok(true) + } +} + +impl InstantElectionProvider for MockFallback { + fn instant_elect( + voters: Vec>, + targets: Vec, + desired_targets: u32, + ) -> Result, Self::Error> { + match FallbackMode::get() { + FallbackModes::Continue => + crate::Continue::::instant_elect(voters, targets, desired_targets) + .map_err(|x| x.to_string()), + FallbackModes::Emergency => crate::InitiateEmergencyPhase::::instant_elect( + voters, + targets, + desired_targets, + ) + .map_err(|x| x.to_string()), + FallbackModes::Onchain => onchain::OnChainExecution::::instant_elect( + voters, + targets, + desired_targets, + ) + .map_err(|e| format!("onchain fallback failed: {:?}", e)), + } + } + fn bother() -> bool { + matches!(FallbackMode::get(), FallbackModes::Onchain) + } +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = Extrinsic; +} + +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + +pub struct ExtBuilder {} + +impl ExtBuilder { + pub fn full() -> Self { + Self {} + } + + pub fn verifier() -> Self { + SignedPhase::set(0); + SignedValidationPhase::set(0); + signed::SignedPhaseSwitch::set(signed::SignedSwitch::Mock); + Self {} + } + + pub fn unsigned() -> Self { + SignedPhase::set(0); + SignedValidationPhase::set(0); + signed::SignedPhaseSwitch::set(signed::SignedSwitch::Mock); + Self {} + } + + pub fn signed() -> Self { + UnsignedPhase::set(0); + Self {} + } +} + +impl ExtBuilder { + pub(crate) fn max_backers_per_winner(self, c: u32) -> Self { + MaxBackersPerWinner::set(c); + self + } + pub(crate) fn max_backers_per_winner_final(self, c: u32) -> Self { + MaxBackersPerWinnerFinal::set(c); + self + } + pub(crate) fn miner_tx_priority(self, p: u64) -> Self { + MinerTxPriority::set(p); + self + } + pub(crate) fn solution_improvement_threshold(self, p: Perbill) -> Self { + SolutionImprovementThreshold::set(p); + self + } + pub(crate) fn election_start(self, at: BlockNumber) -> Self { + ElectionStart::set(at); + self + } + pub(crate) fn pages(self, pages: PageIndex) -> Self { + Pages::set(pages); + self + } + pub(crate) fn voter_per_page(self, count: u32) -> Self { + VoterSnapshotPerBlock::set(count); + self + } + pub(crate) fn miner_max_length(self, len: u32) -> Self { + MinerMaxLength::set(len); + self + } + pub(crate) fn desired_targets(self, t: u32) -> Self { + staking::DesiredTargets::set(t); + self + } + pub(crate) fn signed_phase(self, d: BlockNumber, v: BlockNumber) -> Self { + SignedPhase::set(d); + SignedValidationPhase::set(v); + self + } + pub(crate) fn unsigned_phase(self, d: BlockNumber) -> Self { + UnsignedPhase::set(d); + self + } + pub(crate) fn signed_validation_phase(self, d: BlockNumber) -> Self { + SignedValidationPhase::set(d); + self + } + pub(crate) fn miner_pages(self, p: u32) -> Self { + MinerPages::set(p); + self + } + #[allow(unused)] + pub(crate) fn add_voter(self, who: AccountId, stake: Balance, targets: Vec) -> Self { + staking::VOTERS.with(|v| v.borrow_mut().push((who, stake, targets.try_into().unwrap()))); + self + } + pub(crate) fn fallback_mode(self, mode: FallbackModes) -> Self { + FallbackMode::set(mode); + self + } + pub(crate) fn are_we_done(self, mode: AreWeDoneModes) -> Self { + AreWeDone::set(mode); + self + } + pub(crate) fn build_unchecked(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = + frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let _ = pallet_balances::GenesisConfig:: { + balances: vec![ + // bunch of account for submitting stuff only. + (91, 100), + (92, 100), + (93, 100), + (94, 100), + (95, 100), + (96, 100), + (97, 100), + (99, 100), + (999, 100), + (9999, 100), + ], + ..Default::default() + } + .assimilate_storage(&mut storage); + + sp_io::TestExternalities::from(storage) + } + + /// Warning: this does not execute the post-sanity-checks. + pub(crate) fn build_offchainify(self) -> (sp_io::TestExternalities, Arc>) { + let mut ext = self.build_unchecked(); + let (offchain, _offchain_state) = TestOffchainExt::new(); + let (pool, pool_state) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + (ext, pool_state) + } + + /// Build the externalities, and execute the given s`test` closure with it. + pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = self.build_unchecked(); + ext.execute_with_sanity_checks(test); + } +} + +pub trait ExecuteWithSanityChecks { + fn execute_with_sanity_checks(&mut self, test: impl FnOnce() -> ()); +} + +impl ExecuteWithSanityChecks for sp_io::TestExternalities { + fn execute_with_sanity_checks(&mut self, test: impl FnOnce() -> ()) { + self.execute_with(test); + self.execute_with(all_pallets_sanity_checks) + } +} + +fn all_pallets_sanity_checks() { + let now = System::block_number(); + let _ = VerifierPallet::do_try_state(now).unwrap(); + let _ = UnsignedPallet::do_try_state(now).unwrap(); + let _ = MultiBlock::do_try_state(now).unwrap(); + let _ = SignedPallet::do_try_state(now).unwrap(); +} + +/// Fully verify a solution. +/// +/// This will progress the blocks until the verifier pallet is done verifying it. +/// +/// The solution must have already been loaded via `load_and_start_verification`. +/// +/// Return the final supports, which is the outcome. If this succeeds, then the valid variant of the +/// `QueuedSolution` form `verifier` is ready to be read. +pub fn roll_to_full_verification() -> Vec> { + // we must be ready to verify. + assert_eq!(VerifierPallet::status(), Status::Ongoing(Pages::get() - 1)); + + while matches!(VerifierPallet::status(), Status::Ongoing(_)) { + roll_to(System::block_number() + 1); + } + + (MultiBlock::lsp()..=MultiBlock::msp()) + .map(|p| VerifierPallet::get_queued_solution_page(p).unwrap_or_default()) + .collect::>() +} + +/// Generate a single page of `TestNposSolution` from the give supports. +/// +/// All of the voters in this support must live in a single page of the snapshot, noted by +/// `snapshot_page`. +pub fn solution_from_supports( + supports: sp_npos_elections::Supports, + snapshot_page: PageIndex, +) -> TestNposSolution { + let staked = sp_npos_elections::supports_to_staked_assignment(supports); + let assignments = sp_npos_elections::assignment_staked_to_ratio_normalized(staked).unwrap(); + + let voters = crate::Snapshot::::voters(snapshot_page).unwrap(); + let targets = crate::Snapshot::::targets().unwrap(); + let voter_index = helpers::voter_index_fn_linear::(&voters); + let target_index = helpers::target_index_fn_linear::(&targets); + + TestNposSolution::from_assignment(&assignments, &voter_index, &target_index).unwrap() +} + +/// Generate a raw paged solution from the given vector of supports. +/// +/// Given vector must be aligned with the snapshot, at most need to be 'pagified' which we do +/// internally. +pub fn raw_paged_from_supports( + paged_supports: Vec>, + round: u32, +) -> PagedRawSolution { + let score = { + let flattened = paged_supports.iter().cloned().flatten().collect::>(); + flattened.evaluate() + }; + + let solution_pages = paged_supports + .pagify(Pages::get()) + .map(|(page_index, page_support)| solution_from_supports(page_support.to_vec(), page_index)) + .collect::>(); + + let solution_pages = solution_pages.try_into().unwrap(); + PagedRawSolution { solution_pages, score, round } +} + +/// ensure that the snapshot fully exists. +/// +/// NOTE: this should not be used that often, because we check snapshot in sanity checks, which are +/// called ALL THE TIME. +pub fn assert_full_snapshot() { + assert_ok!(Snapshot::::ensure_snapshot(true, Pages::get())); +} + +/// ensure that the no snapshot exists. +/// +/// NOTE: this should not be used that often, because we check snapshot in sanity checks, which are +/// called ALL THE TIME. +pub fn assert_none_snapshot() { + assert_ok!(Snapshot::::ensure_snapshot(false, Pages::get())); +} + +/// Simple wrapper for mining a new solution. Just more handy in case the interface of mine solution +/// changes. +/// +/// For testing, we never want to do reduce. +pub fn mine_full_solution() -> Result, OffchainMinerError> { + OffchainWorkerMiner::::mine_solution(Pages::get(), false) +} + +/// Same as [`mine_full_solution`] but with custom pages. +pub fn mine_solution( + pages: PageIndex, +) -> Result, OffchainMinerError> { + OffchainWorkerMiner::::mine_solution(pages, false) +} + +/// Assert that `count` voters exist across `pages` number of pages. +pub fn ensure_voters(pages: PageIndex, count: usize) { + assert_eq!(crate::Snapshot::::voter_pages(), pages); + assert_eq!(crate::Snapshot::::voters_iter_flattened().count(), count); +} + +/// Assert that `count` targets exist across `pages` number of pages. +pub fn ensure_targets(pages: PageIndex, count: usize) { + assert_eq!(crate::Snapshot::::target_pages(), pages); + assert_eq!(crate::Snapshot::::targets().unwrap().len(), count); +} + +/// get the events of the multi-block pallet. +pub fn multi_block_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let RuntimeEvent::MultiBlock(inner) = e { Some(inner) } else { None }) + .collect::>() +} + +parameter_types! { + static MultiBlockEvents: u32 = 0; +} + +pub fn multi_block_events_since_last_call() -> Vec> { + let events = multi_block_events(); + let already_seen = MultiBlockEvents::get(); + MultiBlockEvents::set(events.len() as u32); + events.into_iter().skip(already_seen as usize).collect() +} + +/// get the events of the verifier pallet. +pub fn verifier_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map( + |e| if let RuntimeEvent::VerifierPallet(inner) = e { Some(inner) } else { None }, + ) + .collect::>() +} + +/// proceed block number to `n`. +pub fn roll_to(n: BlockNumber) { + crate::Pallet::::roll_to( + n, + matches!(SignedPhaseSwitch::get(), SignedSwitch::Real), + true, + ); +} + +/// proceed block number to whenever the snapshot is fully created (`Phase::Snapshot(0)`). +pub fn roll_to_snapshot_created() { + while !matches!(MultiBlock::current_phase(), Phase::Snapshot(0)) { + roll_next() + } + roll_next(); + assert_full_snapshot(); +} + +/// proceed block number to whenever the unsigned phase is open (`Phase::Unsigned(_)`). +pub fn roll_to_unsigned_open() { + while !matches!(MultiBlock::current_phase(), Phase::Unsigned(_)) { + roll_next() + } +} + +/// proceed block number to whenever the unsigned phase is about to close (`Phase::Unsigned(_)`). +pub fn roll_to_last_unsigned() { + while !matches!(MultiBlock::current_phase(), Phase::Unsigned(0)) { + roll_next() + } +} + +/// proceed block number to whenever the signed phase is open (`Phase::Signed(_)`). +pub fn roll_to_signed_open() { + while !matches!(MultiBlock::current_phase(), Phase::Signed(_)) { + roll_next(); + } +} + +/// proceed block number to whenever the signed validation phase is open +/// (`Phase::SignedValidation(_)`). +pub fn roll_to_signed_validation_open() { + while !matches!(MultiBlock::current_phase(), Phase::SignedValidation(_)) { + roll_next() + } +} + +/// Proceed one block. +pub fn roll_next() { + let now = System::block_number(); + roll_to(now + 1); +} + +/// Proceed one block, and execute offchain workers as well. +pub fn roll_next_with_ocw(maybe_pool: Option>>) { + roll_to_with_ocw(System::block_number() + 1, maybe_pool) +} + +pub fn roll_to_unsigned_open_with_ocw(maybe_pool: Option>>) { + while !matches!(MultiBlock::current_phase(), Phase::Unsigned(_)) { + roll_next_with_ocw(maybe_pool.clone()); + } +} + +/// proceed block number to `n`, while running all offchain workers as well. +pub fn roll_to_with_ocw(n: BlockNumber, maybe_pool: Option>>) { + use sp_runtime::traits::Dispatchable; + let now = System::block_number(); + for i in now + 1..=n { + // check the offchain transaction pool, and if anything's there, submit it. + if let Some(ref pool) = maybe_pool { + pool.read() + .transactions + .clone() + .into_iter() + .map(|uxt| ::decode(&mut &*uxt).unwrap()) + .for_each(|xt| { + xt.function.dispatch(frame_system::RawOrigin::None.into()).unwrap(); + }); + pool.try_write().unwrap().transactions.clear(); + } + + System::set_block_number(i); + + MultiBlock::on_initialize(i); + VerifierPallet::on_initialize(i); + UnsignedPallet::on_initialize(i); + if matches!(SignedPhaseSwitch::get(), SignedSwitch::Real) { + SignedPallet::on_initialize(i); + } + + MultiBlock::offchain_worker(i); + VerifierPallet::offchain_worker(i); + UnsignedPallet::offchain_worker(i); + if matches!(SignedPhaseSwitch::get(), SignedSwitch::Real) { + SignedPallet::offchain_worker(i); + } + + // invariants must hold at the end of each block. + all_pallets_sanity_checks() + } +} + +/// An invalid solution with any score. +pub fn fake_solution(score: ElectionScore) -> PagedRawSolution { + PagedRawSolution { + score, + solution_pages: bounded_vec![Default::default()], + ..Default::default() + } +} + +/// A real solution that's valid, but has a really bad score. +/// +/// This is different from `solution_from_supports` in that it does not require the snapshot to +/// exist. +pub fn raw_paged_solution_low_score() -> PagedRawSolution { + PagedRawSolution { + solution_pages: vec![TestNposSolution { + // 2 targets, both voting for themselves + votes1: vec![(0, 0), (1, 2)], + ..Default::default() + }] + .try_into() + .unwrap(), + round: 0, + score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 }, + } +} + +/// Get the free and held balance of `who`. +pub fn balances(who: AccountId) -> (Balance, Balance) { + ( + Balances::free_balance(who), + Balances::balance_on_hold(&HoldReason::SignedSubmission.into(), &who), + ) +} + +/// Election bounds based on just the given count. +pub fn bound_by_count(count: Option) -> DataProviderBounds { + DataProviderBounds { count: count.map(|x| x.into()), size: None } +} + +pub fn emergency_solution() -> (BoundedSupportsOf, ElectionScore) { + let supports = onchain::OnChainExecution::::elect(0).unwrap(); + let score = supports.evaluate(); + (supports, score) +} diff --git a/substrate/frame/election-provider-multi-block/src/mock/signed.rs b/substrate/frame/election-provider-multi-block/src/mock/signed.rs new file mode 100644 index 0000000000000..a11e737612c08 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/mock/signed.rs @@ -0,0 +1,276 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Balance, Balances, Pages, Runtime, RuntimeEvent, SignedPallet, System}; +use crate::{ + mock::*, + signed::{self as signed_pallet, Event as SignedEvent, Submissions}, + unsigned::miner::MinerConfig, + verifier::{self, AsynchronousVerifier, SolutionDataProvider, VerificationResult, Verifier}, + Event, PadSolutionPages, PagedRawSolution, Pagify, Phase, SolutionOf, +}; +use frame_election_provider_support::PageIndex; +use frame_support::{ + assert_ok, dispatch::PostDispatchInfo, parameter_types, traits::EstimateCallFee, BoundedVec, +}; +use sp_npos_elections::ElectionScore; +use sp_runtime::{traits::Zero, Perbill}; + +parameter_types! { + pub static MockSignedNextSolution: Option, Pages>> = None; + pub static MockSignedNextScore: Option = Default::default(); + pub static MockSignedResults: Vec = Default::default(); +} + +/// A simple implementation of the signed phase that can be controller by some static variables +/// directly. +/// +/// Useful for when you don't care too much about the signed phase. +pub struct MockSignedPhase; +impl SolutionDataProvider for MockSignedPhase { + type Solution = ::Solution; + fn get_page(page: PageIndex) -> Option { + MockSignedNextSolution::get().map(|i| i.get(page as usize).cloned().unwrap_or_default()) + } + + fn get_score() -> Option { + MockSignedNextScore::get() + } + + fn report_result(result: verifier::VerificationResult) { + MOCK_SIGNED_RESULTS.with(|r| r.borrow_mut().push(result)); + } +} + +pub struct FixedCallFee; +impl EstimateCallFee, Balance> for FixedCallFee { + fn estimate_call_fee(_: &signed_pallet::Call, _: PostDispatchInfo) -> Balance { + 1 + } +} + +parameter_types! { + pub static SignedDepositBase: Balance = 5; + pub static SignedDepositPerPage: Balance = 1; + pub static SignedMaxSubmissions: u32 = 3; + pub static SignedRewardBase: Balance = 3; + pub static SignedPhaseSwitch: SignedSwitch = SignedSwitch::Real; + pub static BailoutGraceRatio: Perbill = Perbill::from_percent(20); + pub static EjectGraceRatio: Perbill = Perbill::from_percent(20); +} + +impl crate::signed::Config for Runtime { + type RuntimeHoldReason = RuntimeHoldReason; + type Currency = Balances; + type DepositBase = SignedDepositBase; + type DepositPerPage = SignedDepositPerPage; + type EstimateCallFee = FixedCallFee; + type MaxSubmissions = SignedMaxSubmissions; + type RewardBase = SignedRewardBase; + type BailoutGraceRatio = BailoutGraceRatio; + type EjectGraceRatio = EjectGraceRatio; + type WeightInfo = (); +} + +/// Control which signed phase is being used. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum SignedSwitch { + Mock, + Real, +} + +pub struct DualSignedPhase; +impl SolutionDataProvider for DualSignedPhase { + type Solution = ::Solution; + fn get_page(page: PageIndex) -> Option { + match SignedPhaseSwitch::get() { + SignedSwitch::Mock => MockSignedNextSolution::get() + .map(|i| i.get(page as usize).cloned().unwrap_or_default()), + SignedSwitch::Real => SignedPallet::get_page(page), + } + } + + fn get_score() -> Option { + match SignedPhaseSwitch::get() { + SignedSwitch::Mock => MockSignedNextScore::get(), + SignedSwitch::Real => SignedPallet::get_score(), + } + } + + fn report_result(result: verifier::VerificationResult) { + match SignedPhaseSwitch::get() { + SignedSwitch::Mock => MOCK_SIGNED_RESULTS.with(|r| r.borrow_mut().push(result)), + SignedSwitch::Real => SignedPallet::report_result(result), + } + } +} + +parameter_types! { + static SignedEventsIndex: u32 = 0; +} + +pub fn singed_events_since_last_call() -> Vec> { + let events = signed_events(); + let already_seen = SignedEventsIndex::get(); + SignedEventsIndex::set(events.len() as u32); + events.into_iter().skip(already_seen as usize).collect() +} + +/// get the events of the verifier pallet. +pub fn signed_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let RuntimeEvent::SignedPallet(inner) = e { Some(inner) } else { None }) + .collect::>() +} + +/// Load a signed solution into its pallet. +pub fn load_signed_for_verification(who: AccountId, paged: PagedRawSolution) { + let initial_balance = Balances::free_balance(&who); + assert_eq!(balances(who), (initial_balance, 0)); + + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(who), paged.score)); + + assert_eq!( + balances(who), + (initial_balance - SignedDepositBase::get(), SignedDepositBase::get()) + ); + + for (page_index, solution_page) in paged.solution_pages.pagify(Pages::get()) { + assert_ok!(SignedPallet::submit_page( + RuntimeOrigin::signed(who), + page_index, + Some(Box::new(solution_page.clone())) + )); + } + + let mut events = signed_events(); + for _ in 0..Pages::get() { + let event = events.pop().unwrap(); + assert!(matches!(event, SignedEvent::Stored(_, x, _) if x == who)) + } + assert!(matches!(events.pop().unwrap(), SignedEvent::Registered(_, x, _) if x == who)); + + let full_deposit = + SignedDepositBase::get() + (Pages::get() as Balance) * SignedDepositPerPage::get(); + assert_eq!(balances(who), (initial_balance - full_deposit, full_deposit)); +} + +/// Same as [`load_signed_for_verification`], but also goes forward to the beginning of the signed +/// verification phase. +pub fn load_signed_for_verification_and_start( + who: AccountId, + paged: PagedRawSolution, + _round: u32, +) { + load_signed_for_verification(who, paged); + + // now the solution should start being verified. + roll_to_signed_validation_open(); + assert_eq!( + multi_block_events(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + }, + Event::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(SignedValidationPhase::get() - 1) + } + ] + ); + assert_eq!(verifier_events(), vec![]); +} + +/// Same as [`load_signed_for_verification_and_start`], but also goes forward enough blocks for the +/// solution to be verified, assuming it is all correct. +/// +/// In other words, it goes [`Pages`] blocks forward. +pub fn load_signed_for_verification_and_start_and_roll_to_verified( + who: AccountId, + paged: PagedRawSolution, + _round: u32, +) { + load_signed_for_verification(who, paged.clone()); + + // now the solution should start being verified. + roll_to_signed_validation_open(); + assert_eq!( + multi_block_events(), + vec![ + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(Pages::get()) }, + Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(SignedPhase::get() - 1) + }, + Event::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(SignedValidationPhase::get() - 1) + } + ] + ); + assert_eq!(verifier_events(), vec![]); + + // there is no queued solution prior to the last page of the solution getting verified + assert_eq!(::Verifier::queued_score(), None); + + // roll to the block it is finalized. + for _ in 0..Pages::get() { + roll_next(); + } + + assert_eq!( + verifier_events(), + vec![ + // NOTE: these are hardcoded for 3 page. + verifier::Event::Verified(2, 2), + verifier::Event::Verified(1, 2), + verifier::Event::Verified(0, 2), + verifier::Event::Queued(paged.score, None), + ] + ); + + // there is now a queued solution. + assert_eq!(::Verifier::queued_score(), Some(paged.score)); +} + +/// Load a full raw paged solution for verification. +/// +/// More or less the equivalent of `load_signed_for_verification_and_start`, but when +/// `SignedSwitch::Mock` is set. +pub fn load_mock_signed_and_start(raw_paged: PagedRawSolution) { + assert_eq!( + SignedPhaseSwitch::get(), + SignedSwitch::Mock, + "you should not use this if mock phase is not being mocked" + ); + MockSignedNextSolution::set(Some(raw_paged.solution_pages.pad_solution_pages(Pages::get()))); + MockSignedNextScore::set(Some(raw_paged.score)); + + // Let's gooooo! + assert_ok!(::start()); +} + +/// Ensure that no submission data exists in `round` for `who`. +pub fn assert_no_data_for(round: u32, who: AccountId) { + assert!(!Submissions::::leaderboard(round).into_iter().any(|(x, _)| x == who)); + assert!(Submissions::::metadata_of(round, who).is_none()); + assert!(Submissions::::pages_of(round, who).count().is_zero()); +} diff --git a/substrate/frame/election-provider-multi-block/src/mock/staking.rs b/substrate/frame/election-provider-multi-block/src/mock/staking.rs new file mode 100644 index 0000000000000..072999ae73821 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/mock/staking.rs @@ -0,0 +1,244 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{AccountId, MaxVotesPerVoter, Runtime}; +use crate::VoterOf; +use frame_election_provider_support::{ + data_provider, DataProviderBounds, ElectionDataProvider, PageIndex, VoteWeight, +}; +use frame_support::pallet_prelude::*; +use sp_core::bounded_vec; +use sp_std::prelude::*; + +pub type T = Runtime; + +frame_support::parameter_types! { + pub static Targets: Vec = vec![10, 20, 30, 40]; + pub static Voters: Vec> = vec![ + // page 2: + (1, 10, bounded_vec![10, 20]), + (2, 10, bounded_vec![30, 40]), + (3, 10, bounded_vec![40]), + (4, 10, bounded_vec![10, 20, 40]), + // page 1: + (5, 10, bounded_vec![10, 30, 40]), + (6, 10, bounded_vec![20, 30, 40]), + (7, 10, bounded_vec![20, 30]), + (8, 10, bounded_vec![10]), + // page 0: (self-votes) + (10, 10, bounded_vec![10]), + (20, 20, bounded_vec![20]), + (30, 30, bounded_vec![30]), + (40, 40, bounded_vec![40]), + ]; + pub static DesiredTargets: u32 = 2; + pub static EpochLength: u64 = 30; + + pub static LastIteratedVoterIndex: Option = None; +} + +pub struct MockStaking; +impl ElectionDataProvider for MockStaking { + type AccountId = AccountId; + type BlockNumber = u64; + type MaxVotesPerVoter = MaxVotesPerVoter; + + fn electable_targets( + bounds: DataProviderBounds, + remaining: PageIndex, + ) -> data_provider::Result> { + let targets = Targets::get(); + + if remaining != 0 { + crate::log!( + warn, + "requesting targets for non-zero page, we will return the same page in any case" + ); + } + if bounds.slice_exhausted(&targets) { + return Err("Targets too big") + } + + Ok(targets) + } + + fn electing_voters( + bounds: DataProviderBounds, + remaining: PageIndex, + ) -> data_provider::Result< + Vec<(AccountId, VoteWeight, BoundedVec)>, + > { + let mut voters = Voters::get(); + + // jump to the first non-iterated, if this is a follow up. + if let Some(index) = LastIteratedVoterIndex::get() { + voters = voters.iter().skip(index).cloned().collect::>(); + } + + // take as many as you can. + if let Some(max_len) = bounds.count.map(|c| c.0 as usize) { + voters.truncate(max_len) + } + + if voters.is_empty() { + return Ok(vec![]) + } + + if remaining > 0 { + let last = voters.last().cloned().unwrap(); + LastIteratedVoterIndex::set(Some( + Voters::get().iter().position(|v| v == &last).map(|i| i + 1).unwrap(), + )); + } else { + LastIteratedVoterIndex::set(None) + } + + Ok(voters) + } + + fn desired_targets() -> data_provider::Result { + Ok(DesiredTargets::get()) + } + + fn next_election_prediction(_: u64) -> u64 { + unreachable!("not used in this pallet") + } + + #[cfg(feature = "runtime-benchmarks")] + fn put_snapshot( + voters: Vec<(AccountId, VoteWeight, BoundedVec)>, + targets: Vec, + _target_stake: Option, + ) { + Targets::set(targets); + Voters::set(voters); + } + + #[cfg(feature = "runtime-benchmarks")] + fn clear() { + Targets::set(vec![]); + Voters::set(vec![]); + } + + #[cfg(feature = "runtime-benchmarks")] + fn fetch_page(page: PageIndex) { + use frame_election_provider_support::ElectionProvider; + super::MultiBlock::elect(page).unwrap(); + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_voter( + voter: AccountId, + weight: VoteWeight, + targets: BoundedVec, + ) { + let mut current = Voters::get(); + current.push((voter, weight, targets)); + Voters::set(current); + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_target(target: AccountId) { + use super::ExistentialDeposit; + + let mut current = Targets::get(); + current.push(target); + Targets::set(current); + + // to be on-par with staking, we add a self vote as well. the stake is really not that + // important. + let mut current = Voters::get(); + current.push((target, ExistentialDeposit::get() as u64, vec![target].try_into().unwrap())); + Voters::set(current); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{bound_by_count, ExtBuilder}; + + #[test] + fn targets() { + ExtBuilder::full().build_and_execute(|| { + assert_eq!(Targets::get().len(), 4); + + // any non-zero page returns page zero. + assert_eq!(MockStaking::electable_targets(bound_by_count(None), 2).unwrap().len(), 4); + assert_eq!(MockStaking::electable_targets(bound_by_count(None), 1).unwrap().len(), 4); + + // 0 is also fine. + assert_eq!(MockStaking::electable_targets(bound_by_count(None), 0).unwrap().len(), 4); + + // fetch less targets is error, because targets cannot be sorted (both by MockStaking, + // and the real staking). + assert!(MockStaking::electable_targets(bound_by_count(Some(2)), 0).is_err()); + + // more targets is fine. + assert!(MockStaking::electable_targets(bound_by_count(Some(4)), 0).is_ok()); + assert!(MockStaking::electable_targets(bound_by_count(Some(5)), 0).is_ok()); + }); + } + + #[test] + fn multi_page_votes() { + ExtBuilder::full().build_and_execute(|| { + assert_eq!(MockStaking::electing_voters(bound_by_count(None), 0).unwrap().len(), 12); + assert!(LastIteratedVoterIndex::get().is_none()); + + assert_eq!( + MockStaking::electing_voters(bound_by_count(Some(4)), 0) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![1, 2, 3, 4], + ); + assert!(LastIteratedVoterIndex::get().is_none()); + + assert_eq!( + MockStaking::electing_voters(bound_by_count(Some(4)), 2) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![1, 2, 3, 4], + ); + assert_eq!(LastIteratedVoterIndex::get().unwrap(), 4); + + assert_eq!( + MockStaking::electing_voters(bound_by_count(Some(4)), 1) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![5, 6, 7, 8], + ); + assert_eq!(LastIteratedVoterIndex::get().unwrap(), 8); + + assert_eq!( + MockStaking::electing_voters(bound_by_count(Some(4)), 0) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![10, 20, 30, 40], + ); + assert!(LastIteratedVoterIndex::get().is_none()); + }) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs new file mode 100644 index 0000000000000..78d75520ef581 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs @@ -0,0 +1,206 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + signed::{Config, Pallet, Submissions}, + types::PagedRawSolution, + unsigned::miner::OffchainWorkerMiner, + CurrentPhase, Phase, Round, +}; +use frame_benchmarking::v2::*; +use frame_election_provider_support::ElectionProvider; +use frame_support::pallet_prelude::*; +use frame_system::RawOrigin; +use sp_npos_elections::ElectionScore; +use sp_runtime::traits::One; +use sp_std::boxed::Box; + +#[benchmarks(where T: crate::Config + crate::verifier::Config + crate::unsigned::Config)] +mod benchmarks { + use super::*; + + #[benchmark(pov_mode = Measured)] + fn register_not_full() -> Result<(), BenchmarkError> { + CurrentPhase::::put(Phase::Signed(T::SignedPhase::get() - One::one())); + let round = Round::::get(); + let alice = crate::Pallet::::funded_account("alice", 0); + let score = ElectionScore::default(); + + assert_eq!(Submissions::::sorted_submitters(round).len(), 0); + #[block] + { + Pallet::::register(RawOrigin::Signed(alice).into(), score)?; + } + + assert_eq!(Submissions::::sorted_submitters(round).len(), 1); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn register_eject() -> Result<(), BenchmarkError> { + CurrentPhase::::put(Phase::Signed(T::SignedPhase::get() - One::one())); + let round = Round::::get(); + + for i in 0..T::MaxSubmissions::get() { + let submitter = crate::Pallet::::funded_account("submitter", i); + let score = ElectionScore { minimal_stake: i.into(), ..Default::default() }; + Pallet::::register(RawOrigin::Signed(submitter.clone()).into(), score)?; + + // The first one, which will be ejected, has also submitted all pages + if i == 0 { + for p in 0..T::Pages::get() { + let page = Some(Default::default()); + Pallet::::submit_page(RawOrigin::Signed(submitter.clone()).into(), p, page)?; + } + } + } + + let who = crate::Pallet::::funded_account("who", 0); + let score = + ElectionScore { minimal_stake: T::MaxSubmissions::get().into(), ..Default::default() }; + + assert_eq!( + Submissions::::sorted_submitters(round).len(), + T::MaxSubmissions::get() as usize + ); + + #[block] + { + Pallet::::register(RawOrigin::Signed(who).into(), score)?; + } + + assert_eq!( + Submissions::::sorted_submitters(round).len(), + T::MaxSubmissions::get() as usize + ); + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn submit_page() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::Signed(_)) + }); + + // mine a full solution + let PagedRawSolution { score, solution_pages, .. } = + OffchainWorkerMiner::::mine_solution(T::Pages::get(), false).unwrap(); + let page = Some(Box::new(solution_pages[0].clone())); + + // register alice + let alice = crate::Pallet::::funded_account("alice", 0); + Pallet::::register(RawOrigin::Signed(alice.clone()).into(), score)?; + + #[block] + { + Pallet::::submit_page(RawOrigin::Signed(alice).into(), 0, page)?; + } + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn unset_page() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::Signed(_)) + }); + + // mine a full solution + let PagedRawSolution { score, solution_pages, .. } = + OffchainWorkerMiner::::mine_solution(T::Pages::get(), false).unwrap(); + let page = Some(Box::new(solution_pages[0].clone())); + + // register alice + let alice = crate::Pallet::::funded_account("alice", 0); + Pallet::::register(RawOrigin::Signed(alice.clone()).into(), score)?; + + // submit page + Pallet::::submit_page(RawOrigin::Signed(alice.clone()).into(), 0, page)?; + + #[block] + { + Pallet::::submit_page(RawOrigin::Signed(alice).into(), 0, None)?; + } + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn bail() -> Result<(), BenchmarkError> { + CurrentPhase::::put(Phase::Signed(T::SignedPhase::get() - One::one())); + let alice = crate::Pallet::::funded_account("alice", 0); + + // register alice + let score = ElectionScore::default(); + Pallet::::register(RawOrigin::Signed(alice.clone()).into(), score)?; + + // submit all pages + for p in 0..T::Pages::get() { + let page = Some(Default::default()); + Pallet::::submit_page(RawOrigin::Signed(alice.clone()).into(), p, page)?; + } + + #[block] + { + Pallet::::bail(RawOrigin::Signed(alice).into())?; + } + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn clear_old_round_data(p: Linear<1, { T::Pages::get() }>) -> Result<(), BenchmarkError> { + // set signed phase and alice ready to submit + CurrentPhase::::put(Phase::Signed(T::SignedPhase::get() - One::one())); + let alice = crate::Pallet::::funded_account("alice", 0); + + // register alice + let score = ElectionScore::default(); + Pallet::::register(RawOrigin::Signed(alice.clone()).into(), score)?; + + // submit a solution with p pages. + for pp in 0..p { + let page = Some(Default::default()); + Pallet::::submit_page(RawOrigin::Signed(alice.clone()).into(), pp, page)?; + } + + // force rotate to the next round. + let prev_round = Round::::get(); + crate::Pallet::::rotate_round(); + + #[block] + { + Pallet::::clear_old_round_data(RawOrigin::Signed(alice).into(), prev_round, p)?; + } + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::ExtBuilder::signed().build_unchecked(), + crate::mock::Runtime + ); +} diff --git a/substrate/frame/election-provider-multi-block/src/signed/mod.rs b/substrate/frame/election-provider-multi-block/src/signed/mod.rs new file mode 100644 index 0000000000000..58d73bccf76de --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/signed/mod.rs @@ -0,0 +1,962 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The signed phase of the multi-block election system. +//! +//! Signed submissions work on the basis of keeping a queue of submissions from unknown signed +//! accounts, and sorting them based on the best claimed score to the worst. +//! +//! Each submission must put a deposit down. This is parameterize-able by the runtime, and might be +//! a constant, linear or exponential value. See [`signed::Config::DepositPerPage`] and +//! [`signed::Config::DepositBase`]. +//! +//! During the queuing time, if the queue is full, and a better solution comes in, the weakest +//! deposit is said to be **Ejected**. Ejected solutions get [`signed::Config::EjectGraceRatio`] of +//! their deposit back. This is because we have to delete any submitted pages from them on the spot. +//! They don't get any refund of whatever tx-fee they have paid. +//! +//! Once the time to evaluate the signed phase comes (`Phase::SignedValidation`), the solutions are +//! checked from best-to-worst claim, and they end up in either of the 3 buckets: +//! +//! 1. **Rewarded**: If they are the first correct solution (and consequently the best one, since we +//! start evaluating from the best claim), they are rewarded. Rewarded solutions always get both +//! their deposit and transaction fee back. +//! 2. **Slashed**: Any invalid solution that wasted valuable blockchain time gets slashed for their +//! deposit. +//! 3. **Discarded**: Any solution after the first correct solution is eligible to be peacefully +//! discarded. But, to delete their data, they have to call +//! [`signed::Call::clear_old_round_data`]. Once done, they get their full deposit back. Their +//! tx-fee is not refunded. +//! +//! ## Future Plans: +//! +//! **Lazy deletion**: +//! Overall, this pallet can avoid the need to delete any storage item, by: +//! 1. outsource the storage of solution data to some other pallet. +//! 2. keep it here, but make everything be also a map of the round number, so that we can keep old +//! storage, and it is ONLY EVER removed, when after that round number is over. This can happen +//! for more or less free by the submitter itself, and by anyone else as well, in which case they +//! get a share of the the sum deposit. The share increases as times goes on. +//! **Metadata update**: imagine you mis-computed your score. +//! **whitelisted accounts**: who will not pay deposits are needed. They can still be ejected, but +//! for free. +//! **Permissionless `clear_old_round_data`**: Anyone can clean anyone else's data, and get a part +//! of their deposit. + +use crate::{ + types::SolutionOf, + verifier::{AsynchronousVerifier, SolutionDataProvider, Status, VerificationResult}, +}; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_election_provider_support::PageIndex; +use frame_support::{ + dispatch::DispatchResultWithPostInfo, + pallet_prelude::{StorageDoubleMap, ValueQuery, *}, + traits::{ + tokens::{ + fungible::{Inspect, Mutate, MutateHold}, + Fortitude, Precision, + }, + Defensive, DefensiveSaturating, EstimateCallFee, + }, + BoundedVec, Twox64Concat, +}; +use frame_system::{ensure_signed, pallet_prelude::*}; +use scale_info::TypeInfo; +use sp_io::MultiRemovalResults; +use sp_npos_elections::ElectionScore; +use sp_runtime::{traits::Saturating, Perbill}; +use sp_std::prelude::*; + +/// Explore all weights +pub use crate::weights::measured::pallet_election_provider_multi_block_signed::*; +/// Exports of this pallet +pub use pallet::*; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +pub(crate) type SignedWeightsOf = ::WeightInfo; + +#[cfg(test)] +mod tests; + +type BalanceOf = + <::Currency as Inspect<::AccountId>>::Balance; + +/// All of the (meta) data around a signed submission +#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Default, DebugNoBound)] +#[cfg_attr(test, derive(frame_support::PartialEqNoBound, frame_support::EqNoBound))] +#[codec(mel_bound(T: Config))] +#[scale_info(skip_type_params(T))] +pub struct SubmissionMetadata { + /// The amount of deposit that has been held in reserve. + deposit: BalanceOf, + /// The amount of transaction fee that this submission has cost for its submitter so far. + fee: BalanceOf, + /// The amount of rewards that we expect to give to this submission, if deemed worthy. + reward: BalanceOf, + /// The score that this submission is claiming to achieve. + claimed_score: ElectionScore, + /// A bounded-bool-vec of pages that have been submitted so far. + pages: BoundedVec, +} + +impl SolutionDataProvider for Pallet { + type Solution = SolutionOf; + + fn get_page(page: PageIndex) -> Option { + // note: a non-existing page will still be treated as merely an empty page. This could be + // re-considered. + let current_round = Self::current_round(); + Submissions::::leader(current_round).map(|(who, _score)| { + sublog!(info, "signed", "returning page {} of {:?}'s submission as leader.", page, who); + Submissions::::get_page_of(current_round, &who, page).unwrap_or_default() + }) + } + + fn get_score() -> Option { + Submissions::::leader(Self::current_round()).map(|(_who, score)| score) + } + + fn report_result(result: crate::verifier::VerificationResult) { + // assumption of the trait. + debug_assert!(matches!(::status(), Status::Nothing)); + let current_round = Self::current_round(); + + match result { + VerificationResult::Queued => { + // defensive: if there is a result to be reported, then we must have had some + // leader. + if let Some((winner, metadata)) = + Submissions::::take_leader_with_data(Self::current_round()).defensive() + { + // first, let's give them their reward. + let reward = metadata.reward.saturating_add(metadata.fee); + let _r = T::Currency::mint_into(&winner, reward); + debug_assert!(_r.is_ok()); + Self::deposit_event(Event::::Rewarded( + current_round, + winner.clone(), + reward, + )); + + // then, unreserve their deposit + let _res = T::Currency::release( + &HoldReason::SignedSubmission.into(), + &winner, + metadata.deposit, + Precision::BestEffort, + ); + debug_assert!(_res.is_ok()); + } + }, + VerificationResult::Rejected => { + // defensive: if there is a result to be reported, then we must have had some + // leader. + if let Some((loser, metadata)) = + Submissions::::take_leader_with_data(Self::current_round()).defensive() + { + // first, let's slash their deposit. + let slash = metadata.deposit; + let _res = T::Currency::burn_held( + &HoldReason::SignedSubmission.into(), + &loser, + slash, + Precision::BestEffort, + Fortitude::Force, + ); + debug_assert_eq!(_res, Ok(slash)); + Self::deposit_event(Event::::Slashed(current_round, loser.clone(), slash)); + + // inform the verifier that they can now try again, if we're still in the signed + // validation phase. + if crate::Pallet::::current_phase().is_signed_validation() && + Submissions::::has_leader(current_round) + { + // defensive: verifier just reported back a result, it must be in clear + // state. + let _ = ::start().defensive(); + } + } + }, + VerificationResult::DataUnavailable => { + unreachable!("TODO") + }, + } + } +} + +/// Something that can compute the base deposit that is collected upon `register`. +/// +/// A blanket impl allows for any `Get` to be used as-is, which will always return the said balance +/// as deposit. +pub trait CalculateBaseDeposit { + fn calculate_base_deposit(existing_submitters: usize) -> Balance; +} + +impl> CalculateBaseDeposit for G { + fn calculate_base_deposit(_existing_submitters: usize) -> Balance { + G::get() + } +} + +/// Something that can calculate the deposit per-page upon `submit`. +/// +/// A blanket impl allows for any `Get` to be used as-is, which will always return the said balance +/// as deposit **per page**. +pub trait CalculatePageDeposit { + fn calculate_page_deposit(existing_submitters: usize, page_size: usize) -> Balance; +} + +impl + Saturating, G: Get> CalculatePageDeposit for G { + fn calculate_page_deposit(_existing_submitters: usize, page_size: usize) -> Balance { + let page_size: Balance = (page_size as u32).into(); + G::get().saturating_mul(page_size) + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::{WeightInfo, *}; + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: crate::Config { + /// Handler to the currency. + type Currency: Inspect + + Mutate + + MutateHold; + + /// Base deposit amount for a submission. + type DepositBase: CalculateBaseDeposit>; + + /// Extra deposit per-page. + type DepositPerPage: CalculatePageDeposit>; + + /// Base reward that is given to the winner. + type RewardBase: Get>; + + /// Maximum number of submissions. This, combined with `SignedValidationPhase` and `Pages` + /// dictates how many signed solutions we can verify. + type MaxSubmissions: Get; + + /// The ratio of the deposit to return in case a signed account submits a solution via + /// [`Pallet::register`], but later calls [`Pallet::bail`]. + /// + /// This should be large enough to cover for the deletion cost of possible all pages. To be + /// safe, you can put it to 100% to begin with to fully dis-incentivize bailing. + type BailoutGraceRatio: Get; + + /// The ratio of the deposit to return in case a signed account is ejected from the queue. + /// + /// This value is assumed to be 100% for accounts that are in the invulnerable list, + /// which can only be set by governance. + type EjectGraceRatio: Get; + + /// Handler to estimate the fee of a call. Useful to refund the transaction fee of the + /// submitter for the winner. + type EstimateCallFee: EstimateCallFee, BalanceOf>; + + /// Overarching hold reason. + type RuntimeHoldReason: From; + + /// Provided weights of this pallet. + type WeightInfo: WeightInfo; + } + + /// The hold reason of this palelt. + #[pallet::composite_enum] + pub enum HoldReason { + /// Because of submitting a signed solution. + #[codec(index = 0)] + SignedSubmission, + } + + /// Wrapper type for signed submissions. + /// + /// It handles 3 storage items: + /// + /// 1. [`SortedScores`]: A flat vector of all submissions' `(submitter_id, claimed_score)`. + /// 2. [`SubmissionStorage`]: Paginated map of of all submissions, keyed by submitter and page. + /// 3. [`SubmissionMetadataStorage`]: Map from submitter to the metadata of their submission. + /// + /// All storage items in this group are mapped, and their first key is the `round` to which they + /// belong to. In essence, we are storing multiple versions of each group. + /// + /// ### Invariants: + /// + /// This storage group is sane, clean, and consistent if the following invariants are held: + /// + /// Among the submissions of each round: + /// - `SortedScores` should never contain duplicate account ids. + /// - For any account id in `SortedScores`, a corresponding value should exist in + /// `SubmissionMetadataStorage` under that account id's key. + /// - And the value of `metadata.score` must be equal to the score stored in + /// `SortedScores`. + /// - And visa versa: for any key existing in `SubmissionMetadataStorage`, an item must exist in + /// `SortedScores`. + /// - For any first key existing in `SubmissionStorage`, a key must exist in + /// `SubmissionMetadataStorage`. + /// - For any first key in `SubmissionStorage`, the number of second keys existing should be the + /// same as the `true` count of `pages` in [`SubmissionMetadata`] (this already implies the + /// former, since it uses the metadata). + /// + /// All mutating functions are only allowed to transition into states where all of the above + /// conditions are met. + /// + /// No particular invariant exists between data that related to different rounds. They are + /// purely independent. + pub(crate) struct Submissions(sp_std::marker::PhantomData); + + #[pallet::storage] + type SortedScores = StorageMap< + _, + Twox64Concat, + u32, + BoundedVec<(T::AccountId, ElectionScore), T::MaxSubmissions>, + ValueQuery, + >; + + /// Triple map from (round, account, page) to a solution page. + #[pallet::storage] + type SubmissionStorage = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + ), + SolutionOf, + OptionQuery, + >; + + /// Map from account to the metadata of their submission. + /// + /// invariant: for any Key1 of type `AccountId` in [`Submissions`], this storage map also has a + /// value. + #[pallet::storage] + type SubmissionMetadataStorage = + StorageDoubleMap<_, Twox64Concat, u32, Twox64Concat, T::AccountId, SubmissionMetadata>; + + impl Submissions { + // -- mutating functions + + /// Generic checked mutation helper. + /// + /// All mutating functions must be fulled through this bad boy. The round at which the + /// mutation happens must be provided + fn mutate_checked R>(_round: u32, mutate: F) -> R { + let result = mutate(); + + #[cfg(debug_assertions)] + { + assert!(Self::sanity_check_round(_round).is_ok()); + assert!(Self::sanity_check_round(_round + 1).is_ok()); + assert!(Self::sanity_check_round(_round.saturating_sub(1)).is_ok()); + } + + result + } + + /// *Fully* **TAKE** (i.e. get and remove) the leader from storage, with all of its + /// associated data. + /// + /// This removes all associated data of the leader from storage, discarding the submission + /// data and score, returning the rest. + pub(crate) fn take_leader_with_data( + round: u32, + ) -> Option<(T::AccountId, SubmissionMetadata)> { + Self::mutate_checked(round, || { + SortedScores::::mutate(round, |sorted| sorted.pop()).and_then( + |(submitter, _score)| { + // NOTE: safe to remove unbounded, as at most `Pages` pages are stored. + let r: MultiRemovalResults = SubmissionStorage::::clear_prefix( + (round, &submitter), + u32::MAX, + None, + ); + debug_assert!(r.unique <= T::Pages::get()); + + SubmissionMetadataStorage::::take(round, &submitter) + .map(|metadata| (submitter, metadata)) + }, + ) + }) + } + + /// *Fully* **TAKE** (i.e. get and remove) a submission from storage, with all of its + /// associated data. + /// + /// This removes all associated data of the submitter from storage, discarding the + /// submission data and score, returning the metadata. + pub(crate) fn take_submission_with_data( + round: u32, + who: &T::AccountId, + ) -> Option> { + Self::mutate_checked(round, || { + let mut sorted_scores = SortedScores::::get(round); + if let Some(index) = sorted_scores.iter().position(|(x, _)| x == who) { + sorted_scores.remove(index); + } + if sorted_scores.is_empty() { + SortedScores::::remove(round); + } else { + SortedScores::::insert(round, sorted_scores); + } + + // Note: safe to remove unbounded, as at most `Pages` pages are stored. + let r = SubmissionStorage::::clear_prefix((round, who), u32::MAX, None); + debug_assert!(r.unique <= T::Pages::get()); + + SubmissionMetadataStorage::::take(round, who) + }) + } + + /// Try and register a new solution. + /// + /// Registration can only happen for the current round. + /// + /// registration might fail if the queue is already full, and the solution is not good + /// enough to eject the weakest. + fn try_register( + round: u32, + who: &T::AccountId, + metadata: SubmissionMetadata, + ) -> Result { + Self::mutate_checked(round, || Self::try_register_inner(round, who, metadata)) + } + + fn try_register_inner( + round: u32, + who: &T::AccountId, + metadata: SubmissionMetadata, + ) -> Result { + let mut sorted_scores = SortedScores::::get(round); + + let discarded = if let Some(_) = sorted_scores.iter().position(|(x, _)| x == who) { + return Err(Error::::Duplicate.into()); + } else { + // must be new. + debug_assert!(!SubmissionMetadataStorage::::contains_key(round, who)); + + let pos = match sorted_scores + .binary_search_by_key(&metadata.claimed_score, |(_, y)| *y) + { + // an equal score exists, unlikely, but could very well happen. We just put them + // next to each other. + Ok(pos) => pos, + // new score, should be inserted in this pos. + Err(pos) => pos, + }; + + let record = (who.clone(), metadata.claimed_score); + match sorted_scores.force_insert_keep_right(pos, record) { + Ok(None) => false, + Ok(Some((discarded, _score))) => { + let maybe_metadata = + SubmissionMetadataStorage::::take(round, &discarded).defensive(); + // Note: safe to remove unbounded, as at most `Pages` pages are stored. + let _r = SubmissionStorage::::clear_prefix( + (round, &discarded), + u32::MAX, + None, + ); + debug_assert!(_r.unique <= T::Pages::get()); + + if let Some(metadata) = maybe_metadata { + Pallet::::settle_deposit( + &discarded, + metadata.deposit, + T::EjectGraceRatio::get(), + ); + } + + Pallet::::deposit_event(Event::::Ejected(round, discarded)); + true + }, + Err(_) => return Err(Error::::QueueFull.into()), + } + }; + + SortedScores::::insert(round, sorted_scores); + SubmissionMetadataStorage::::insert(round, who, metadata); + Ok(discarded) + } + + /// Submit a page of `solution` to the `page` index of `who`'s submission. + /// + /// Updates the deposit in the metadata accordingly. + /// + /// - If `maybe_solution` is `None`, then the given page is deleted. + /// - `who` must have already registered their submission. + /// - If the page is duplicate, it will replaced. + pub(crate) fn try_mutate_page( + round: u32, + who: &T::AccountId, + page: PageIndex, + maybe_solution: Option>>, + ) -> DispatchResultWithPostInfo { + Self::mutate_checked(round, || { + Self::try_mutate_page_inner(round, who, page, maybe_solution) + }) + } + + /// Get the deposit of a registration with the given number of pages. + fn deposit_for(pages: usize) -> BalanceOf { + let round = Pallet::::current_round(); + let queue_size = Self::submitters_count(round); + let base = T::DepositBase::calculate_base_deposit(queue_size); + let pages = T::DepositPerPage::calculate_page_deposit(queue_size, pages); + base.saturating_add(pages) + } + + fn try_mutate_page_inner( + round: u32, + who: &T::AccountId, + page: PageIndex, + maybe_solution: Option>>, + ) -> DispatchResultWithPostInfo { + let mut metadata = + SubmissionMetadataStorage::::get(round, who).ok_or(Error::::NotRegistered)?; + ensure!(page < T::Pages::get(), Error::::BadPageIndex); + + // defensive only: we resize `meta.pages` once to be `T::Pages` elements once, and never + // resize it again; `page` is checked here to be in bound; element must exist; qed. + if let Some(page_bit) = metadata.pages.get_mut(page as usize).defensive() { + *page_bit = maybe_solution.is_some(); + } + + // update deposit. + let new_pages = metadata.pages.iter().filter(|x| **x).count(); + let new_deposit = Self::deposit_for(new_pages); + let old_deposit = metadata.deposit; + if new_deposit > old_deposit { + let to_reserve = new_deposit - old_deposit; + T::Currency::hold(&HoldReason::SignedSubmission.into(), who, to_reserve)?; + } else { + let to_unreserve = old_deposit - new_deposit; + let _res = T::Currency::release( + &HoldReason::SignedSubmission.into(), + who, + to_unreserve, + Precision::BestEffort, + ); + debug_assert_eq!(_res, Ok(to_unreserve)); + }; + metadata.deposit = new_deposit; + + // If a page is being added, we record the fee as well. For removals, we ignore the fee + // as it is negligible, and we don't want to encourage anyone to submit and remove + // anyways. Note that fee is only refunded for the winner anyways. + if maybe_solution.is_some() { + let fee = T::EstimateCallFee::estimate_call_fee( + &Call::submit_page { page, maybe_solution: maybe_solution.clone() }, + None.into(), + ); + metadata.fee.saturating_accrue(fee); + } + + SubmissionStorage::::mutate_exists((round, who, page), |maybe_old_solution| { + *maybe_old_solution = maybe_solution.map(|s| *s) + }); + SubmissionMetadataStorage::::insert(round, who, metadata); + Ok(().into()) + } + + // -- getter functions + pub(crate) fn has_leader(round: u32) -> bool { + !SortedScores::::get(round).is_empty() + } + + pub(crate) fn leader(round: u32) -> Option<(T::AccountId, ElectionScore)> { + SortedScores::::get(round).last().cloned() + } + + pub(crate) fn submitters_count(round: u32) -> usize { + SortedScores::::get(round).len() + } + + pub(crate) fn get_page_of( + round: u32, + who: &T::AccountId, + page: PageIndex, + ) -> Option> { + SubmissionStorage::::get((round, who, &page)) + } + } + + #[allow(unused)] + #[cfg(any(feature = "try-runtime", test, feature = "runtime-benchmarks", debug_assertions))] + impl Submissions { + pub(crate) fn sorted_submitters(round: u32) -> BoundedVec { + use frame_support::traits::TryCollect; + SortedScores::::get(round).into_iter().map(|(x, _)| x).try_collect().unwrap() + } + + pub fn submissions_iter( + round: u32, + ) -> impl Iterator)> { + SubmissionStorage::::iter_prefix((round,)).map(|((x, y), z)| (x, y, z)) + } + + pub fn metadata_iter( + round: u32, + ) -> impl Iterator)> { + SubmissionMetadataStorage::::iter_prefix(round) + } + + pub fn metadata_of(round: u32, who: T::AccountId) -> Option> { + SubmissionMetadataStorage::::get(round, who) + } + + pub fn pages_of( + round: u32, + who: T::AccountId, + ) -> impl Iterator)> { + SubmissionStorage::::iter_prefix((round, who)) + } + + pub fn leaderboard( + round: u32, + ) -> BoundedVec<(T::AccountId, ElectionScore), T::MaxSubmissions> { + SortedScores::::get(round) + } + + /// Ensure that all the storage items associated with the given round are in `killed` state, + /// meaning that in the expect state after an election is OVER. + pub(crate) fn ensure_killed(round: u32) -> DispatchResult { + ensure!(Self::metadata_iter(round).count() == 0, "metadata_iter not cleared."); + ensure!(Self::submissions_iter(round).count() == 0, "submissions_iter not cleared."); + ensure!(Self::sorted_submitters(round).len() == 0, "sorted_submitters not cleared."); + + Ok(()) + } + + /// Ensure that no data associated with `who` exists for `round`. + pub(crate) fn ensure_killed_with(who: &T::AccountId, round: u32) -> DispatchResult { + ensure!( + SubmissionMetadataStorage::::get(round, who).is_none(), + "metadata not cleared." + ); + ensure!( + SubmissionStorage::::iter_prefix((round, who)).count() == 0, + "submissions not cleared." + ); + ensure!( + SortedScores::::get(round).iter().all(|(x, _)| x != who), + "sorted_submitters not cleared." + ); + + Ok(()) + } + + /// Perform all the sanity checks of this storage item group at the given round. + pub(crate) fn sanity_check_round(round: u32) -> DispatchResult { + use sp_std::collections::btree_set::BTreeSet; + let sorted_scores = SortedScores::::get(round); + assert_eq!( + sorted_scores.clone().into_iter().map(|(x, _)| x).collect::>().len(), + sorted_scores.len() + ); + + let _ = SubmissionMetadataStorage::::iter_prefix(round) + .map(|(submitter, meta)| { + let mut matches = SortedScores::::get(round) + .into_iter() + .filter(|(who, _score)| who == &submitter) + .collect::>(); + + ensure!( + matches.len() == 1, + "item existing in metadata but missing in sorted list.", + ); + + let (_, score) = matches.pop().expect("checked; qed"); + ensure!(score == meta.claimed_score, "score mismatch"); + Ok(()) + }) + .collect::, &'static str>>()?; + + ensure!( + SubmissionStorage::::iter_key_prefix((round,)).map(|(k1, _k2)| k1).all( + |submitter| SubmissionMetadataStorage::::contains_key(round, submitter) + ), + "missing metadata of submitter" + ); + + for submitter in SubmissionStorage::::iter_key_prefix((round,)).map(|(k1, _k2)| k1) { + let pages_count = + SubmissionStorage::::iter_key_prefix((round, &submitter)).count(); + let metadata = SubmissionMetadataStorage::::get(round, submitter) + .expect("metadata checked to exist for all keys; qed"); + let assumed_pages_count = metadata.pages.iter().filter(|x| **x).count(); + ensure!(pages_count == assumed_pages_count, "wrong page count"); + } + + Ok(()) + } + } + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Upcoming submission has been registered for the given account, with the given score. + Registered(u32, T::AccountId, ElectionScore), + /// A page of solution solution with the given index has been stored for the given account. + Stored(u32, T::AccountId, PageIndex), + /// The given account has been rewarded with the given amount. + Rewarded(u32, T::AccountId, BalanceOf), + /// The given account has been slashed with the given amount. + Slashed(u32, T::AccountId, BalanceOf), + /// The given solution, for the given round, was ejected. + Ejected(u32, T::AccountId), + /// The given account has been discarded. + Discarded(u32, T::AccountId), + /// The given account has bailed. + Bailed(u32, T::AccountId), + } + + #[pallet::error] + pub enum Error { + /// The phase is not signed. + PhaseNotSigned, + /// The submission is a duplicate. + Duplicate, + /// The queue is full. + QueueFull, + /// The page index is out of bounds. + BadPageIndex, + /// The account is not registered. + NotRegistered, + /// No submission found. + NoSubmission, + /// Round is not yet over. + RoundNotOver, + /// Bad witness data provided. + BadWitnessData, + } + + #[pallet::call] + impl Pallet { + /// Register oneself for an upcoming signed election. + #[pallet::weight(SignedWeightsOf::::register_eject())] + #[pallet::call_index(0)] + pub fn register( + origin: OriginFor, + claimed_score: ElectionScore, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(crate::Pallet::::current_phase().is_signed(), Error::::PhaseNotSigned); + + // note: we could already check if this is a duplicate here, but prefer keeping the code + // simple for now. + + let deposit = Submissions::::deposit_for(0); + let reward = T::RewardBase::get(); + let fee = T::EstimateCallFee::estimate_call_fee( + &Call::register { claimed_score }, + None.into(), + ); + let mut pages = BoundedVec::<_, _>::with_bounded_capacity(T::Pages::get() as usize); + pages.bounded_resize(T::Pages::get() as usize, false); + + let new_metadata = SubmissionMetadata { claimed_score, deposit, reward, fee, pages }; + + T::Currency::hold(&HoldReason::SignedSubmission.into(), &who, deposit)?; + let round = Self::current_round(); + let discarded = Submissions::::try_register(round, &who, new_metadata)?; + Self::deposit_event(Event::::Registered(round, who, claimed_score)); + + // maybe refund. + if discarded { + Ok(().into()) + } else { + Ok(Some(SignedWeightsOf::::register_not_full()).into()) + } + } + + /// Submit a single page of a solution. + /// + /// Must always come after [`Pallet::register`]. + /// + /// `maybe_solution` can be set to `None` to erase the page. + /// + /// Collects deposits from the signed origin based on [`Config::DepositBase`] and + /// [`Config::DepositPerPage`]. + #[pallet::weight(SignedWeightsOf::::submit_page())] + #[pallet::call_index(1)] + pub fn submit_page( + origin: OriginFor, + page: PageIndex, + maybe_solution: Option>>, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(crate::Pallet::::current_phase().is_signed(), Error::::PhaseNotSigned); + let is_set = maybe_solution.is_some(); + + let round = Self::current_round(); + Submissions::::try_mutate_page(round, &who, page, maybe_solution)?; + Self::deposit_event(Event::::Stored(round, who, page)); + + // maybe refund. + if is_set { + Ok(().into()) + } else { + Ok(Some(SignedWeightsOf::::unset_page()).into()) + } + } + + /// Retract a submission. + /// + /// A portion of the deposit may be returned, based on the [`Config::BailoutGraceRatio`]. + /// + /// This will fully remove the solution from storage. + #[pallet::weight(SignedWeightsOf::::bail())] + #[pallet::call_index(2)] + pub fn bail(origin: OriginFor) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(crate::Pallet::::current_phase().is_signed(), Error::::PhaseNotSigned); + let round = Self::current_round(); + let metadata = Submissions::::take_submission_with_data(round, &who) + .ok_or(Error::::NoSubmission)?; + + let deposit = metadata.deposit; + Self::settle_deposit(&who, deposit, T::BailoutGraceRatio::get()); + Self::deposit_event(Event::::Bailed(round, who)); + + Ok(None.into()) + } + + /// Clear the data of a submitter form an old round. + /// + /// The dispatch origin of this call must be signed, and the original submitter. + /// + /// This can only be called for submissions that end up being discarded, as in they are not + /// processed and they end up lingering in the queue. + #[pallet::call_index(3)] + #[pallet::weight(SignedWeightsOf::::clear_old_round_data(*witness_pages))] + pub fn clear_old_round_data( + origin: OriginFor, + round: u32, + witness_pages: u32, + ) -> DispatchResultWithPostInfo { + let discarded = ensure_signed(origin)?; + + let current_round = Self::current_round(); + // we can only operate on old rounds. + ensure!(round < current_round, Error::::RoundNotOver); + + let metadata = Submissions::::take_submission_with_data(round, &discarded) + .ok_or(Error::::NoSubmission)?; + ensure!( + metadata.pages.iter().filter(|p| **p).count() as u32 <= witness_pages, + Error::::BadWitnessData + ); + + // give back their deposit. + let _res = T::Currency::release( + &HoldReason::SignedSubmission.into(), + &discarded, + metadata.deposit, + Precision::BestEffort, + ); + debug_assert_eq!(_res, Ok(metadata.deposit)); + Self::deposit_event(Event::::Discarded(current_round, discarded)); + + // IFF all good, this is free of charge. + Ok(None.into()) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_: BlockNumberFor) -> Weight { + // this code is only called when at the boundary of phase transition, which is already + // captured by the parent pallet. No need for weight. + let weight_taken_into_account: Weight = Default::default(); + + if crate::Pallet::::current_phase().is_signed_validation_opened_now() { + let maybe_leader = Submissions::::leader(Self::current_round()); + sublog!( + info, + "signed", + "signed validation started, sending validation start signal? {:?}", + maybe_leader.is_some() + ); + + // start an attempt to verify our best thing. + if maybe_leader.is_some() { + // defensive: signed phase has just began, verifier should be in a clear state + // and ready to accept a solution. + let _ = ::start().defensive(); + } + } + + if crate::Pallet::::current_phase().is_unsigned_opened_now() { + // signed validation phase just ended, make sure you stop any ongoing operation. + sublog!(info, "signed", "signed validation ended, sending validation stop signal",); + ::stop(); + } + + weight_taken_into_account + } + + #[cfg(feature = "try-runtime")] + fn try_state(n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state(n) + } + } +} + +impl Pallet { + #[cfg(any(feature = "try-runtime", test, feature = "runtime-benchmarks"))] + pub(crate) fn do_try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Submissions::::sanity_check_round(Self::current_round()) + } + + fn current_round() -> u32 { + crate::Pallet::::round() + } + + fn settle_deposit(who: &T::AccountId, deposit: BalanceOf, grace: Perbill) { + let to_refund = grace * deposit; + let to_slash = deposit.defensive_saturating_sub(to_refund); + + let _res = T::Currency::release( + &HoldReason::SignedSubmission.into(), + who, + to_refund, + Precision::BestEffort, + ) + .defensive(); + debug_assert_eq!(_res, Ok(to_refund)); + + let _res = T::Currency::burn_held( + &HoldReason::SignedSubmission.into(), + who, + to_slash, + Precision::BestEffort, + Fortitude::Force, + ) + .defensive(); + debug_assert_eq!(_res, Ok(to_slash)); + } +} diff --git a/substrate/frame/election-provider-multi-block/src/signed/tests.rs b/substrate/frame/election-provider-multi-block/src/signed/tests.rs new file mode 100644 index 0000000000000..271dca34c7fde --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/signed/tests.rs @@ -0,0 +1,636 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{Event as SignedEvent, *}; +use crate::{mock::*, verifier::FeasibilityError}; +use sp_core::bounded_vec; + +pub type T = Runtime; + +mod calls { + use super::*; + use sp_runtime::{DispatchError, TokenError::FundsUnavailable}; + + #[test] + fn cannot_register_with_insufficient_balance() { + ExtBuilder::signed().build_and_execute(|| { + roll_to_signed_open(); + // 777 is not funded. + assert_noop!( + SignedPallet::register(RuntimeOrigin::signed(777), Default::default()), + DispatchError::Token(FundsUnavailable) + ); + }); + + ExtBuilder::signed().build_and_execute(|| { + roll_to_signed_open(); + // 99 is funded but deposit is too high. + assert_eq!(balances(99), (100, 0)); + SignedDepositBase::set(101); + assert_noop!( + SignedPallet::register(RuntimeOrigin::signed(99), Default::default()), + DispatchError::Token(FundsUnavailable) + ); + }) + } + + #[test] + fn cannot_register_if_not_signed() { + ExtBuilder::signed().build_and_execute(|| { + assert!(!crate::Pallet::::current_phase().is_signed()); + assert_noop!( + SignedPallet::register(RuntimeOrigin::signed(99), Default::default()), + Error::::PhaseNotSigned + ); + }) + } + + #[test] + fn register_metadata_works() { + ExtBuilder::signed().build_and_execute(|| { + roll_to_signed_open(); + assert_full_snapshot(); + + assert_eq!(balances(99), (100, 0)); + let score = ElectionScore { minimal_stake: 100, ..Default::default() }; + + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score)); + assert_eq!(balances(99), (95, 5)); + + assert_eq!(Submissions::::metadata_iter(1).count(), 0); + assert_eq!(Submissions::::metadata_iter(0).count(), 1); + assert_eq!( + Submissions::::metadata_of(0, 99).unwrap(), + SubmissionMetadata { + claimed_score: score, + deposit: 5, + fee: 1, + pages: bounded_vec![false, false, false], + reward: 3 + } + ); + assert_eq!( + *Submissions::::leaderboard(0), + vec![(99, ElectionScore { minimal_stake: 100, ..Default::default() })] + ); + assert!(matches!(signed_events().as_slice(), &[ + SignedEvent::Registered(_, x, _), + ] if x == 99)); + + // second ones submits + assert_eq!(balances(999), (100, 0)); + let score = ElectionScore { minimal_stake: 90, ..Default::default() }; + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(999), score)); + assert_eq!(balances(999), (95, 5)); + + assert_eq!( + Submissions::::metadata_of(0, 999).unwrap(), + SubmissionMetadata { + claimed_score: score, + deposit: 5, + fee: 1, + pages: bounded_vec![false, false, false], + reward: 3 + } + ); + assert!(matches!(signed_events().as_slice(), &[ + SignedEvent::Registered(..), + SignedEvent::Registered(_, x, _), + ] if x == 999)); + + assert_eq!( + *Submissions::::leaderboard(0), + vec![ + (999, ElectionScore { minimal_stake: 90, ..Default::default() }), + (99, ElectionScore { minimal_stake: 100, ..Default::default() }) + ] + ); + assert_eq!(Submissions::::metadata_iter(1).count(), 0); + assert_eq!(Submissions::::metadata_iter(0).count(), 2); + + // submit again with a new score. + assert_noop!( + SignedPallet::register( + RuntimeOrigin::signed(999), + ElectionScore { minimal_stake: 80, ..Default::default() } + ), + Error::::Duplicate, + ); + }) + } + + #[test] + fn page_submission_accumulates_fee() { + ExtBuilder::signed().build_and_execute(|| { + roll_to_signed_open(); + assert_full_snapshot(); + + let score = ElectionScore { minimal_stake: 100, ..Default::default() }; + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score)); + + // fee for register is recorded. + assert_eq!( + Submissions::::metadata_of(0, 99).unwrap(), + SubmissionMetadata { + claimed_score: score, + deposit: 5, + fee: 1, + pages: bounded_vec![false, false, false], + reward: 3 + } + ); + + // fee for page submission is recorded. + assert_ok!(SignedPallet::submit_page( + RuntimeOrigin::signed(99), + 0, + Some(Default::default()) + )); + assert_eq!( + Submissions::::metadata_of(0, 99).unwrap(), + SubmissionMetadata { + claimed_score: score, + deposit: 6, + fee: 2, + pages: bounded_vec![true, false, false], + reward: 3 + } + ); + + // another fee for page submission is recorded. + assert_ok!(SignedPallet::submit_page( + RuntimeOrigin::signed(99), + 1, + Some(Default::default()) + )); + assert_eq!( + Submissions::::metadata_of(0, 99).unwrap(), + SubmissionMetadata { + claimed_score: score, + deposit: 7, + fee: 3, + pages: bounded_vec![true, true, false], + reward: 3 + } + ); + + // removal updates deposit but not the fee + assert_ok!(SignedPallet::submit_page(RuntimeOrigin::signed(99), 1, None)); + + assert_eq!( + Submissions::::metadata_of(0, 99).unwrap(), + SubmissionMetadata { + claimed_score: score, + deposit: 6, + fee: 3, + pages: bounded_vec![true, false, false], + reward: 3 + } + ); + }); + } + + #[test] + fn metadata_submission_sorted_based_on_stake() { + ExtBuilder::signed().build_and_execute(|| { + roll_to_signed_open(); + assert_full_snapshot(); + + let score_from = |x| ElectionScore { minimal_stake: x, ..Default::default() }; + + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(91), score_from(100))); + assert_eq!(*Submissions::::leaderboard(0), vec![(91, score_from(100))]); + assert_eq!(balances(91), (95, 5)); + assert!(matches!(signed_events().as_slice(), &[SignedEvent::Registered(_, 91, _)])); + + // weaker one comes while we have space. + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(92), score_from(90))); + assert_eq!( + *Submissions::::leaderboard(0), + vec![(92, score_from(90)), (91, score_from(100))] + ); + assert_eq!(balances(92), (95, 5)); + assert!(matches!( + signed_events().as_slice(), + &[SignedEvent::Registered(..), SignedEvent::Registered(_, 92, _),] + )); + + // stronger one comes while we have have space. + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(93), score_from(110))); + assert_eq!( + *Submissions::::leaderboard(0), + vec![(92, score_from(90)), (91, score_from(100)), (93, score_from(110))] + ); + assert_eq!(balances(93), (95, 5)); + assert!(matches!( + signed_events().as_slice(), + &[ + SignedEvent::Registered(..), + SignedEvent::Registered(..), + SignedEvent::Registered(_, 93, _), + ] + )); + + // weaker one comes while we don't have space. + assert_noop!( + SignedPallet::register(RuntimeOrigin::signed(94), score_from(80)), + Error::::QueueFull + ); + assert_eq!( + *Submissions::::leaderboard(0), + vec![(92, score_from(90)), (91, score_from(100)), (93, score_from(110))] + ); + assert_eq!(balances(94), (100, 0)); + // no event has been emitted this time. + assert!(matches!( + signed_events().as_slice(), + &[ + SignedEvent::Registered(..), + SignedEvent::Registered(..), + SignedEvent::Registered(..), + ] + )); + + // stronger one comes while we don't have space. Eject the weakest + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(94), score_from(120))); + assert_eq!( + *Submissions::::leaderboard(0), + vec![(91, score_from(100)), (93, score_from(110)), (94, score_from(120))] + ); + assert!(matches!( + signed_events().as_slice(), + &[ + SignedEvent::Registered(..), + SignedEvent::Registered(..), + SignedEvent::Registered(..), + SignedEvent::Ejected(_, 92), + SignedEvent::Registered(_, 94, _), + ] + )); + assert_eq!(balances(94), (95, 5)); + // 92 is ejected, 1 unit of deposit is refunded, 4 units are slashed. + // see the default `EjectGraceRatio`. + assert_eq!(balances(92), (96, 0)); + + // another stronger one comes, only replace the weakest. + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(95), score_from(105))); + assert_eq!( + *Submissions::::leaderboard(0), + vec![(95, score_from(105)), (93, score_from(110)), (94, score_from(120))] + ); + assert_eq!(balances(95), (95, 5)); + // 91 is ejected, they get only a part of the deposit back. + assert_eq!(balances(91), (96, 0)); + assert!(matches!( + signed_events().as_slice(), + &[ + SignedEvent::Registered(..), + SignedEvent::Registered(..), + SignedEvent::Registered(..), + SignedEvent::Ejected(..), + SignedEvent::Registered(..), + SignedEvent::Ejected(_, 91), + SignedEvent::Registered(_, 95, _), + ] + )); + }) + } + + #[test] + fn can_bail_at_a_cost() { + ExtBuilder::signed().build_and_execute(|| { + roll_to_signed_open(); + assert_full_snapshot(); + + let score = ElectionScore { minimal_stake: 100, ..Default::default() }; + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score)); + assert_eq!(balances(99), (95, 5)); + + // not submitted, cannot bailout. + assert_noop!(SignedPallet::bail(RuntimeOrigin::signed(999)), Error::::NoSubmission); + + // can bail. + assert_ok!(SignedPallet::bail(RuntimeOrigin::signed(99))); + // 20% of the deposit returned, which is 1, 4 is slashed. + assert_eq!(balances(99), (96, 0)); + assert_no_data_for(0, 99); + + assert_eq!( + signed_events(), + vec![Event::Registered(0, 99, score), Event::Bailed(0, 99)] + ); + }); + } + + #[test] + fn can_submit_pages() { + ExtBuilder::signed().build_and_execute(|| { + roll_to_signed_open(); + assert_full_snapshot(); + + assert_noop!( + SignedPallet::submit_page(RuntimeOrigin::signed(99), 0, Default::default()), + Error::::NotRegistered + ); + + assert_ok!(SignedPallet::register( + RuntimeOrigin::signed(99), + ElectionScore { minimal_stake: 100, ..Default::default() } + )); + + assert_eq!(Submissions::::pages_of(0, 99).count(), 0); + assert_eq!(balances(99), (95, 5)); + + // indices 0, 1, 2 are valid. + assert_noop!( + SignedPallet::submit_page(RuntimeOrigin::signed(99), 3, Default::default()), + Error::::BadPageIndex + ); + + // add the first page. + assert_ok!(SignedPallet::submit_page( + RuntimeOrigin::signed(99), + 0, + Some(Default::default()) + )); + assert_eq!(Submissions::::pages_of(0, 99).count(), 1); + assert_eq!(balances(99), (94, 6)); + assert_eq!( + Submissions::::metadata_of(0, 99).unwrap().pages.into_inner(), + vec![true, false, false] + ); + + // replace it again, nada. + assert_ok!(SignedPallet::submit_page( + RuntimeOrigin::signed(99), + 0, + Some(Default::default()) + )); + assert_eq!(Submissions::::pages_of(0, 99).count(), 1); + assert_eq!(balances(99), (94, 6)); + + // add a new one. + assert_ok!(SignedPallet::submit_page( + RuntimeOrigin::signed(99), + 1, + Some(Default::default()) + )); + assert_eq!(Submissions::::pages_of(0, 99).count(), 2); + assert_eq!(balances(99), (93, 7)); + assert_eq!( + Submissions::::metadata_of(0, 99).unwrap().pages.into_inner(), + vec![true, true, false] + ); + + // remove one, deposit is back. + assert_ok!(SignedPallet::submit_page(RuntimeOrigin::signed(99), 0, None)); + assert_eq!(Submissions::::pages_of(0, 99).count(), 1); + assert_eq!(balances(99), (94, 6)); + assert_eq!( + Submissions::::metadata_of(0, 99).unwrap().pages.into_inner(), + vec![false, true, false] + ); + + assert!(matches!( + signed_events().as_slice(), + &[ + SignedEvent::Registered(..), + SignedEvent::Stored(.., 0), + SignedEvent::Stored(.., 0), + SignedEvent::Stored(.., 1), + SignedEvent::Stored(.., 0), + ] + )); + }); + } +} + +mod e2e { + use super::*; + #[test] + fn good_bad_evil() { + // an extensive scenario: 3 solutions submitted, once rewarded, one slashed, and one + // discarded. + ExtBuilder::signed().build_and_execute(|| { + roll_to_signed_open(); + assert_full_snapshot(); + + // an invalid, but weak solution. + { + let score = + ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 }; + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score)); + assert_ok!(SignedPallet::submit_page( + RuntimeOrigin::signed(99), + 0, + Some(Default::default()) + )); + + assert_eq!(balances(99), (94, 6)); + } + + // a valid, strong solution. + let strong_score = { + let paged = mine_full_solution().unwrap(); + load_signed_for_verification(999, paged.clone()); + assert_eq!(balances(999), (92, 8)); + paged.score + }; + + // an invalid, strong solution. + { + let mut score = strong_score; + score.minimal_stake *= 2; + assert_ok!(SignedPallet::register(RuntimeOrigin::signed(92), score)); + assert_eq!(balances(92), (95, 5)); + // we don't even bother to submit a page.. + } + + assert_eq!( + Submissions::::leaderboard(0) + .into_iter() + .map(|(x, _)| x) + .collect::>(), + vec![99, 999, 92] + ); + + assert_eq!( + Submissions::::metadata_iter(0).collect::>(), + vec![ + ( + 92, + SubmissionMetadata { + deposit: 5, + fee: 1, + reward: 3, + claimed_score: ElectionScore { + minimal_stake: 110, + sum_stake: 130, + sum_stake_squared: 8650 + }, + pages: bounded_vec![false, false, false] + } + ), + ( + 999, + SubmissionMetadata { + deposit: 8, + fee: 4, + reward: 3, + claimed_score: ElectionScore { + minimal_stake: 55, + sum_stake: 130, + sum_stake_squared: 8650 + }, + pages: bounded_vec![true, true, true] + } + ), + ( + 99, + SubmissionMetadata { + deposit: 6, + fee: 2, + reward: 3, + claimed_score: ElectionScore { + minimal_stake: 10, + sum_stake: 10, + sum_stake_squared: 100 + }, + pages: bounded_vec![true, false, false] + } + ) + ] + ); + + roll_to_signed_validation_open(); + + // 92 is slashed in 3 blocks, 999 becomes rewarded in 3 blocks, , and 99 is discarded. + roll_next(); + roll_next(); + roll_next(); + + assert_eq!( + Submissions::::leaderboard(0) + .into_iter() + .map(|(x, _)| x) + .collect::>(), + vec![99, 999] + ); + + roll_next(); + roll_next(); + roll_next(); + + assert_eq!( + singed_events_since_last_call(), + vec![ + Event::Registered( + 0, + 99, + ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 } + ), + Event::Stored(0, 99, 0), + Event::Registered( + 0, + 999, + ElectionScore { + minimal_stake: 55, + sum_stake: 130, + sum_stake_squared: 8650 + } + ), + Event::Stored(0, 999, 0), + Event::Stored(0, 999, 1), + Event::Stored(0, 999, 2), + Event::Registered( + 0, + 92, + ElectionScore { + minimal_stake: 110, + sum_stake: 130, + sum_stake_squared: 8650 + } + ), + Event::Slashed(0, 92, 5), + Event::Rewarded(0, 999, 7), + ] + ); + + assert_eq!( + verifier_events(), + vec![ + crate::verifier::Event::Verified(2, 0), + crate::verifier::Event::Verified(1, 0), + crate::verifier::Event::Verified(0, 0), + crate::verifier::Event::VerificationFailed(0, FeasibilityError::InvalidScore), + crate::verifier::Event::Verified(2, 2), + crate::verifier::Event::Verified(1, 2), + crate::verifier::Event::Verified(0, 2), + crate::verifier::Event::Queued( + ElectionScore { + minimal_stake: 55, + sum_stake: 130, + sum_stake_squared: 8650 + }, + None + ) + ] + ); + + // 99 is discarded -- for now they have some deposit collected, which they have to + // manually collect next. + assert_eq!(balances(99), (94, 6)); + // 999 has gotten their deposit back, plus fee and reward back. + assert_eq!(balances(999), (107, 0)); + // 92 loses a part of their deposit for being ejected. + assert_eq!(balances(92), (95, 0)); + + // the data associated with 999 is already removed. + assert_ok!(Submissions::::ensure_killed_with(&999, 0)); + // the data associated with 92 is already removed. + assert_ok!(Submissions::::ensure_killed_with(&92, 0)); + // but not for 99 + assert!(Submissions::::ensure_killed_with(&99, 0).is_err()); + + // we cannot cleanup just yet. + assert_noop!( + SignedPallet::clear_old_round_data(RuntimeOrigin::signed(99), 0, Pages::get()), + Error::::RoundNotOver + ); + + MultiBlock::rotate_round(); + + // now we can delete our stuff. + assert_ok!(SignedPallet::clear_old_round_data( + RuntimeOrigin::signed(99), + 0, + Pages::get() + )); + // our stuff is gone. + assert_ok!(Submissions::::ensure_killed_with(&99, 0)); + + // check events. + assert_eq!(singed_events_since_last_call(), vec![Event::Discarded(1, 99)]); + + // 99 now has their deposit returned. + assert_eq!(balances(99), (100, 0)); + + // signed pallet should be in 100% clean state. + assert_ok!(Submissions::::ensure_killed(0)); + }) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/types.rs b/substrate/frame/election-provider-multi-block/src/types.rs new file mode 100644 index 0000000000000..53215c1f27de4 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/types.rs @@ -0,0 +1,424 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Common types and traits of the EPMB pallet group. +//! +//! ## [`SolutionOf`] +//! +//! This type is among the most cryptic used in the EPMB pallet. The origins of this type go back to +//! the fact that sending a solution, with hundreds or thousands of account-ids in it would be too +//! large for a chain to handle. This was particularly the case in a single page solution, as +//! developed in `election-provider-multi-phase`. To combat this, a "compact" custom type is +//! generated to encapsulate a solution. This type is generated by +//! [`frame_election_provider_support::generate_solution_type`]. See the documentation of this macro +//! for more information about the hacks used to reduce the size of the solution. +//! +//! Consequently, the [`SolutionVoterIndexOf`] and [`SolutionTargetIndexOf`] and +//! [`SolutionAccuracyOf`] are derived from this type. +//! +//! ## [`Phase`] +//! +//! This is the most important type of this pallet, demonstrating the state-machine used +//! to manage the election process and its various phases. + +use crate::unsigned::miner::MinerConfig; +use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen}; +use frame_election_provider_support::ElectionProvider; +pub use frame_election_provider_support::{NposSolution, PageIndex}; +use frame_support::{ + traits::DefensiveSaturating, BoundedVec, CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, + PartialEqNoBound, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use scale_info::TypeInfo; +use sp_core::Get; +pub use sp_npos_elections::{ElectionResult, ElectionScore}; +use sp_runtime::{ + traits::{CheckedSub, One, Zero}, + SaturatedConversion, Saturating, +}; +use sp_std::{collections::btree_set::BTreeSet, fmt::Debug, prelude::*}; + +/// The solution type used by this crate. +pub type SolutionOf = ::Solution; +/// The voter index. Derived from [`SolutionOf`]. +pub type SolutionVoterIndexOf = as NposSolution>::VoterIndex; +/// The target index. Derived from [`SolutionOf`]. +pub type SolutionTargetIndexOf = as NposSolution>::TargetIndex; +/// The accuracy of the election, when submitted from offchain. Derived from [`SolutionOf`]. +pub type SolutionAccuracyOf = as NposSolution>::Accuracy; +/// The fallback election type. +pub type FallbackErrorOf = <::Fallback as ElectionProvider>::Error; + +/// The relative distribution of a voter's stake among the winning targets. +pub type AssignmentOf = + sp_npos_elections::Assignment<::AccountId, SolutionAccuracyOf>; + +/// A paginated raw solution type. +/// +/// This is the representation of a stored, unverified solution. +/// +/// After feasibility, it is converted into `Supports`. +#[derive( + TypeInfo, + Encode, + Decode, + DecodeWithMemTracking, + DebugNoBound, + CloneNoBound, + EqNoBound, + PartialEqNoBound, + MaxEncodedLen, + DefaultNoBound, +)] +#[codec(mel_bound(T: crate::Config))] +#[scale_info(skip_type_params(T))] +pub struct PagedRawSolution { + /// The individual pages. + pub solution_pages: BoundedVec, ::Pages>, + /// The final claimed score post feasibility and concatenation of all pages. + pub score: ElectionScore, + /// The designated round. + pub round: u32, +} + +impl PagedRawSolution { + /// Get the total number of voters, assuming that voters in each page are unique. + pub fn voter_count(&self) -> usize { + self.solution_pages + .iter() + .map(|page| page.voter_count()) + .fold(0usize, |acc, x| acc.saturating_add(x)) + } + + /// Get the total number of winners, assuming that there's only a single page of targets. + pub fn winner_count_single_page_target_snapshot(&self) -> usize { + self.solution_pages + .iter() + .map(|page| page.unique_targets()) + .into_iter() + .flatten() + .collect::>() + .len() + } + + /// Get the total number of edges. + pub fn edge_count(&self) -> usize { + self.solution_pages + .iter() + .map(|page| page.edge_count()) + .fold(0usize, |acc, x| acc.saturating_add(x)) + } +} + +/// A helper trait to deal with the page index of partial solutions. +/// +/// This should only be called on the `Vec` or similar types. If the solution is *full*, +/// then it returns a normal iterator that is just mapping the index (usize) to `PageIndex`. +/// +/// if the solution is partial, it shifts the indices sufficiently so that the most significant page +/// of the solution matches with the most significant page of the snapshot onchain. +/// +/// See the tests below for examples. +pub trait Pagify { + /// Pagify a reference. + fn pagify(&self, bound: PageIndex) -> Box + '_>; +} + +impl Pagify for Vec { + fn pagify(&self, desired_pages: PageIndex) -> Box + '_> { + Box::new( + self.into_iter() + .enumerate() + .map(|(p, s)| (p.saturated_into::(), s)) + .map(move |(p, s)| { + let desired_pages_usize = desired_pages as usize; + // TODO: this could be an error. + debug_assert!(self.len() <= desired_pages_usize); + let padding = desired_pages_usize.saturating_sub(self.len()); + let new_page = p.saturating_add(padding.saturated_into::()); + (new_page, s) + }), + ) + } +} + +/// Helper trait to pad a partial solution such that the leftover pages are filled with zero. +/// +/// See the tests below for examples. +pub trait PadSolutionPages: Sized { + /// Pad the solution to the given number of pages. + fn pad_solution_pages(self, desired_pages: PageIndex) -> Self; +} + +impl> PadSolutionPages + for BoundedVec +{ + fn pad_solution_pages(self, desired_pages: PageIndex) -> Self { + let desired_pages_usize = (desired_pages).min(Bound::get()) as usize; + debug_assert!(self.len() <= desired_pages_usize); + if self.len() == desired_pages_usize { + return self + } + + // we basically need to prepend the list with this many items. + let empty_slots = desired_pages_usize.saturating_sub(self.len()); + let self_as_vec = sp_std::iter::repeat(Default::default()) + .take(empty_slots) + .chain(self.into_iter()) + .collect::>(); + self_as_vec.try_into().expect("sum of both iterators has at most `desired_pages_usize` items; `desired_pages_usize` is `min`-ed by `Bound`; conversion cannot fail; qed") + } +} + +/// Alias for a voter, parameterized by the miner config. +pub(crate) type VoterOf = frame_election_provider_support::Voter< + ::AccountId, + ::MaxVotesPerVoter, +>; + +/// Alias for a page of voters, parameterized by this crate's config. +pub(crate) type VoterPageOf = BoundedVec, ::VoterSnapshotPerBlock>; + +/// Alias for all pages of voters, parameterized by this crate's config. +pub(crate) type AllVoterPagesOf = BoundedVec, ::Pages>; + +/// Maximum number of items that [`AllVoterPagesOf`] can contain, when flattened. +pub(crate) struct MaxFlattenedVoters(sp_std::marker::PhantomData); +impl Get for MaxFlattenedVoters { + fn get() -> u32 { + T::VoterSnapshotPerBlock::get().saturating_mul(T::Pages::get()) + } +} + +/// Same as [`AllVoterPagesOf`], but instead of being a nested bounded vec, the entire voters are +/// flattened into one outer, unbounded `Vec` type. +/// +/// This is bounded by [`MaxFlattenedVoters`]. +pub(crate) type AllVoterPagesFlattenedOf = BoundedVec, MaxFlattenedVoters>; + +/// Current phase of the pallet. +#[derive( + PartialEqNoBound, + EqNoBound, + CloneNoBound, + Encode, + Decode, + DecodeWithMemTracking, + MaxEncodedLen, + DebugNoBound, + TypeInfo, +)] +#[codec(mel_bound(T: crate::Config))] +#[scale_info(skip_type_params(T))] +pub enum Phase { + /// Nothing is happening, but it might. + Off, + /// Signed phase is open. + /// + /// The inner value is the number of blocks left in this phase. + Signed(BlockNumberFor), + /// We are validating results. + /// + /// This always follows the signed phase, and is a window of time in which we try to validate + /// our signed results. + /// + /// The inner value is the number of blocks left in this phase. + SignedValidation(BlockNumberFor), + /// Unsigned phase. + /// + /// The inner value is the number of blocks left in this phase. + /// + /// We do not yet check whether the unsigned phase is active or passive. The intent is for the + /// blockchain to be able to declare: "I believe that there exists an adequate signed + /// solution," advising validators not to bother running the unsigned offchain worker. + /// + /// As validator nodes are free to edit their OCW code, they could simply ignore this advisory + /// and always compute their own solution. However, by default, when the unsigned phase is + /// passive, the offchain workers will not bother running. + Unsigned(BlockNumberFor), + /// Snapshot is being created. No other operation is allowed. This can be one or more blocks. + /// The inner value should be read as "`remaining` number of pages are left to be fetched". + /// Thus, if inner value is `0` if the snapshot is complete and we are ready to move on. + /// + /// This value should be interpreted after `on_initialize` of this pallet has already been + /// called. + Snapshot(PageIndex), + /// Snapshot is done, and we are waiting for `Export` to kick in. + Done, + /// Exporting has begun, and the given page was the last one received. + /// + /// Once this is active, no more signed or solutions will be accepted. + Export(PageIndex), + /// The emergency phase. This is could be enabled by one of the fallbacks, and locks the pallet + /// such that only governance can change the state. + Emergency, +} + +impl Copy for Phase {} + +impl Default for Phase { + fn default() -> Self { + Phase::Off + } +} + +impl Phase { + /// Get the phase that we should set in storage once we receive the start signal. + pub(crate) fn start_phase() -> Self { + // note that we add one block because we want the target snapshot to happen one block + // before. + Self::Snapshot(T::Pages::get()) + } + + /// Consume self and return the next variant, as per what the current phase is. + pub fn next(self) -> Self { + match self { + // for these phases, we do nothing. + Self::Off => Self::Off, + Self::Emergency => Self::Emergency, + + // snapshot phase + Self::Snapshot(0) => + if let Some(signed_duration) = T::SignedPhase::get().checked_sub(&One::one()) { + Self::Signed(signed_duration) + } else if let Some(unsigned_duration) = + T::UnsignedPhase::get().checked_sub(&One::one()) + { + Self::Unsigned(unsigned_duration) + } else { + T::AreWeDone::get() + }, + Self::Snapshot(non_zero_remaining) => + Self::Snapshot(non_zero_remaining.defensive_saturating_sub(One::one())), + + // signed phase + Self::Signed(zero) if zero == BlockNumberFor::::zero() => + Self::SignedValidation(T::SignedValidationPhase::get().saturating_sub(One::one())), + Self::Signed(non_zero_left) => + Self::Signed(non_zero_left.defensive_saturating_sub(One::one())), + + // signed validation + Self::SignedValidation(zero) if zero == BlockNumberFor::::zero() => + if let Some(unsigned_duration) = T::UnsignedPhase::get().checked_sub(&One::one()) { + Self::Unsigned(unsigned_duration) + } else { + T::AreWeDone::get() + }, + Self::SignedValidation(non_zero_left) => + Self::SignedValidation(non_zero_left.defensive_saturating_sub(One::one())), + + // unsigned phase -- at this phase we will + Self::Unsigned(zero) if zero == BlockNumberFor::::zero() => T::AreWeDone::get(), + Self::Unsigned(non_zero_left) => + Self::Unsigned(non_zero_left.defensive_saturating_sub(One::one())), + + // Done + Self::Done => Self::Done, + + // Export + Self::Export(0) => Self::Off, + Self::Export(non_zero_left) => + Self::Export(non_zero_left.defensive_saturating_sub(One::one())), + } + } + + /// Whether the phase is emergency or not. + pub fn is_emergency(&self) -> bool { + matches!(self, Phase::Emergency) + } + + /// Whether the phase is signed or not. + pub fn is_signed(&self) -> bool { + matches!(self, Phase::Signed(_)) + } + + /// Whether the phase is unsigned or not. + pub fn is_unsigned(&self) -> bool { + matches!(self, Phase::Unsigned(_)) + } + + /// Whether the phase is off or not. + pub fn is_off(&self) -> bool { + matches!(self, Phase::Off) + } + + /// Whether the phase is snapshot or not. + pub fn is_snapshot(&self) -> bool { + matches!(self, Phase::Snapshot(_)) + } + + /// Whether the phase is done or not. + pub fn is_done(&self) -> bool { + matches!(self, Phase::Done) + } + + /// Whether the phase is export or not. + pub fn is_export(&self) -> bool { + matches!(self, Phase::Export(_)) + } + + /// Whether the phase is signed validation or not. + pub fn is_signed_validation(&self) -> bool { + matches!(self, Phase::SignedValidation(_)) + } + + /// Whether the signed phase is opened now. + pub fn is_signed_validation_opened_now(&self) -> bool { + self == &Phase::SignedValidation(T::SignedValidationPhase::get().saturating_sub(One::one())) + } + + /// Whether the unsigned phase is opened now. + pub fn is_unsigned_opened_now(&self) -> bool { + self == &Phase::Unsigned(T::UnsignedPhase::get().saturating_sub(One::one())) + } +} + +#[cfg(test)] +mod pagify { + use super::{PadSolutionPages, Pagify}; + use frame_support::{traits::ConstU32, BoundedVec}; + use sp_core::bounded_vec; + + #[test] + fn pagify_works() { + // is a noop when you have the same length + assert_eq!( + vec![10, 11, 12].pagify(3).collect::>(), + vec![(0, &10), (1, &11), (2, &12)] + ); + + // pads the values otherwise + assert_eq!(vec![10, 11].pagify(3).collect::>(), vec![(1, &10), (2, &11)]); + assert_eq!(vec![10].pagify(3).collect::>(), vec![(2, &10)]); + } + + #[test] + fn pad_solution_pages_works() { + // noop if the solution is complete, as with pagify. + let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![1u32, 2, 3]; + assert_eq!(solution.pad_solution_pages(3).into_inner(), vec![1, 2, 3]); + + // pads the solution with default if partial.. + let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![2, 3]; + assert_eq!(solution.pad_solution_pages(3).into_inner(), vec![0, 2, 3]); + + // behaves the same as `pad_solution_pages(3)`. + let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![2, 3]; + assert_eq!(solution.pad_solution_pages(4).into_inner(), vec![0, 2, 3]); + } +} diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs new file mode 100644 index 0000000000000..574ae8db790a1 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs @@ -0,0 +1,85 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + unsigned::{miner::OffchainWorkerMiner, Call, Config, Pallet}, + verifier::Verifier, + CurrentPhase, Phase, +}; +use frame_benchmarking::v2::*; +use frame_election_provider_support::ElectionProvider; +use frame_support::{assert_ok, pallet_prelude::*}; +use frame_system::RawOrigin; +use sp_std::boxed::Box; + +#[benchmarks(where T: crate::Config + crate::signed::Config + crate::verifier::Config)] +mod benchmarks { + use super::*; + + #[benchmark(pov_mode = Measured)] + fn validate_unsigned() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::Unsigned(_)) + }); + let call: Call = OffchainWorkerMiner::::mine_solution(T::MinerPages::get(), false) + .map(|solution| Call::submit_unsigned { paged_solution: Box::new(solution) }) + .unwrap(); + + #[block] + { + assert_ok!(Pallet::::validate_unsigned(TransactionSource::Local, &call)); + } + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn submit_unsigned() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + // roll to unsigned phase open + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::Unsigned(_)) + }); + // TODO: we need to better ensure that this is actually worst case + let solution = + OffchainWorkerMiner::::mine_solution(T::MinerPages::get(), false).unwrap(); + + // nothing is queued + assert!(T::Verifier::queued_score().is_none()); + #[block] + { + assert_ok!(Pallet::::submit_unsigned(RawOrigin::None.into(), Box::new(solution))); + } + + // something is queued + assert!(T::Verifier::queued_score().is_some()); + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::ExtBuilder::full().build_unchecked(), + crate::mock::Runtime + ); +} diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs new file mode 100644 index 0000000000000..bafc78cd9d6ed --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs @@ -0,0 +1,2248 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The miner code for the EPMB pallet. +//! +//! It is broadly consisted of two main types: +//! +//! * [`crate::unsigned::miner::BaseMiner`], which is more generic, needs parameterization via +//! [`crate::unsigned::miner::MinerConfig`], and can be used by an external implementation. +//! * [`crate::unsigned::miner::OffchainWorkerMiner`], which is more opinionated, and is used by +//! this pallet via the `offchain_worker` hook to also mine solutions during the +//! `Phase::Unsigned`. + +use super::{Call, Config, Pallet}; +use crate::{ + helpers, + types::{PadSolutionPages, *}, + verifier::{self}, + CommonError, +}; +use codec::Encode; +use frame_election_provider_support::{ExtendedBalance, NposSolver, Support, VoteWeight}; +use frame_support::{traits::Get, BoundedVec}; +use frame_system::pallet_prelude::*; +use scale_info::TypeInfo; +use sp_npos_elections::EvaluateSupport; +use sp_runtime::{ + offchain::storage::{MutateStorageError, StorageValueRef}, + traits::{SaturatedConversion, Saturating, Zero}, +}; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; + +// TODO: we should have a fuzzer for miner that ensures no matter the parameters, it generates a +// valid solution. Esp. for the trimming. + +/// The type of the snapshot. +/// +/// Used to express errors. +#[derive(Debug, Eq, PartialEq)] +pub enum SnapshotType { + /// Voters at the given page missing. + Voters(PageIndex), + /// Targets missing. + Targets, + /// Metadata missing. + Metadata, + /// Desired targets missing. + DesiredTargets, +} + +pub(crate) type MinerSolverErrorOf = <::Solver as NposSolver>::Error; + +/// The errors related to the [`BaseMiner`]. +#[derive( + frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound, +)] +pub enum MinerError { + /// An internal error in the NPoS elections crate. + NposElections(sp_npos_elections::Error), + /// An internal error in the generic solver. + Solver(MinerSolverErrorOf), + /// Snapshot data was unavailable unexpectedly. + SnapshotUnAvailable(SnapshotType), + /// The base, common errors from the pallet. + Common(CommonError), + /// The solution generated from the miner is not feasible. + Feasibility(verifier::FeasibilityError), + /// Some page index has been invalid. + InvalidPage, + /// Too many winners were removed during trimming. + TooManyWinnersRemoved, + /// A defensive error has occurred. + Defensive(&'static str), +} + +impl From for MinerError { + fn from(e: sp_npos_elections::Error) -> Self { + MinerError::NposElections(e) + } +} + +impl From for MinerError { + fn from(e: verifier::FeasibilityError) -> Self { + MinerError::Feasibility(e) + } +} + +impl From for MinerError { + fn from(e: CommonError) -> Self { + MinerError::Common(e) + } +} + +/// The errors related to the `OffchainWorkerMiner`. +#[derive( + frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound, +)] +pub enum OffchainMinerError { + /// An error in the base miner. + BaseMiner(MinerError), + /// The base, common errors from the pallet. + Common(CommonError), + /// Something went wrong fetching the lock. + Lock(&'static str), + /// Submitting a transaction to the pool failed. + PoolSubmissionFailed, + /// Cannot restore a solution that was not stored. + NoStoredSolution, + /// Cached solution is not a `submit_unsigned` call. + SolutionCallInvalid, + /// Failed to store a solution. + FailedToStoreSolution, +} + +impl From> for OffchainMinerError { + fn from(e: MinerError) -> Self { + OffchainMinerError::BaseMiner(e) + } +} + +impl From for OffchainMinerError { + fn from(e: CommonError) -> Self { + OffchainMinerError::Common(e) + } +} + +/// Configurations for the miner. +/// +/// This is extracted from the main crate's config so that an offchain miner can readily use the +/// [`BaseMiner`] without needing to deal with the rest of the pallet's configuration. +pub trait MinerConfig { + /// The account id type. + type AccountId: Ord + Clone + codec::Codec + core::fmt::Debug; + /// The solution that the miner is mining. + /// The solution type. + type Solution: codec::FullCodec + + Default + + PartialEq + + Eq + + Clone + + sp_std::fmt::Debug + + Ord + + NposSolution + + TypeInfo + + codec::MaxEncodedLen; + /// The solver type. + type Solver: NposSolver; + /// The maximum length that the miner should use for a solution, per page. + /// + /// This value is not set in stone, and it is up to an individual miner to configure. A good + /// value is something like 75% of the total block length, which can be fetched from the system + /// pallet. + type MaxLength: Get; + /// Maximum number of votes per voter. + /// + /// Must be the same as configured in the [`crate::Config::DataProvider`]. + /// + /// For simplicity, this is 16 in Polkadot and 24 in Kusama. + type MaxVotesPerVoter: Get; + /// Maximum number of winners to select per page. + /// + /// The miner should respect this, it is used for trimming, and bounded data types. + /// + /// Should equal to the onchain value set in `Verifier::Config`. + type MaxWinnersPerPage: Get; + /// Maximum number of backers per winner, per page. + /// + /// The miner should respect this, it is used for trimming, and bounded data types. + /// + /// Should equal to the onchain value set in `Verifier::Config`. + type MaxBackersPerWinner: Get; + /// Maximum number of backers, per winner, across all pages. + /// + /// The miner should respect this, it is used for trimming, and bounded data types. + /// + /// Should equal to the onchain value set in `Verifier::Config`. + type MaxBackersPerWinnerFinal: Get; + /// Maximum number of backers, per winner, per page. + + /// Maximum number of pages that we may compute. + /// + /// Must be the same as configured in the [`crate::Config`]. + type Pages: Get; + /// Maximum number of voters per snapshot page. + /// + /// Must be the same as configured in the [`crate::Config`]. + type VoterSnapshotPerBlock: Get; + /// Maximum number of targets per snapshot page. + /// + /// Must be the same as configured in the [`crate::Config`]. + type TargetSnapshotPerBlock: Get; + /// The hash type of the runtime. + type Hash: Eq + PartialEq; +} + +/// A base miner that is only capable of mining a new solution and checking it against the state of +/// this pallet for feasibility, and trimming its length/weight. +pub struct BaseMiner(sp_std::marker::PhantomData); + +/// Parameterized `BoundedSupports` for the miner. +/// +/// The bounds of this are set such to only encapsulate a single page of a snapshot. The other +/// counterpart is [`FullSupportsOfMiner`]. +pub type PageSupportsOfMiner = frame_election_provider_support::BoundedSupports< + ::AccountId, + ::MaxWinnersPerPage, + ::MaxBackersPerWinner, +>; + +/// The full version of [`PageSupportsOfMiner`]. +/// +/// This should be used on a support instance that is encapsulating the full solution. +/// +/// Another way to look at it, this is never wrapped in a `Vec<_>` +pub type FullSupportsOfMiner = frame_election_provider_support::BoundedSupports< + ::AccountId, + ::MaxWinnersPerPage, + ::MaxBackersPerWinnerFinal, +>; + +/// Aggregator for inputs to [`BaseMiner`]. +pub struct MineInput { + /// Number of winners to pick. + pub desired_targets: u32, + /// All of the targets. + pub all_targets: BoundedVec, + /// Paginated list of voters. + /// + /// Note for staking-miners: How this is calculated is rather delicate, and the order of the + /// nested vectors matter. See carefully how `OffchainWorkerMiner::mine_solution` is doing + /// this. + pub voter_pages: AllVoterPagesOf, + /// Number of pages to mind. + /// + /// Note for staking-miner: Always use [`MinerConfig::Pages`] unless explicitly wanted + /// otherwise. + pub pages: PageIndex, + /// Whether to reduce the solution. Almost always`` + pub do_reduce: bool, + /// The current round for which the solution is being calculated. + pub round: u32, +} + +impl BaseMiner { + /// Mine a new npos solution, with the given number of pages. + /// + /// This miner is only capable of mining a solution that either uses all of the pages of the + /// snapshot, or the top `pages` thereof. + /// + /// This always trims the solution to match a few parameters: + /// + /// [`MinerConfig::MaxWinnersPerPage`], [`MinerConfig::MaxBackersPerWinner`], + /// [`MinerConfig::MaxBackersPerWinnerFinal`] and [`MinerConfig::MaxLength`]. + /// + /// The order of pages returned is aligned with the snapshot. For example, the index 0 of the + /// returning solution pages corresponds to the page 0 of the snapshot. + /// + /// The only difference is, if the solution is partial, then [`Pagify`] must be used to properly + /// pad the results. + pub fn mine_solution( + MineInput { desired_targets, all_targets, voter_pages, mut pages, do_reduce, round }: MineInput< + T, + >, + ) -> Result, MinerError> { + pages = pages.min(T::Pages::get()); + + // we also build this closure early, so we can let `targets` be consumed. + let voter_page_fn = helpers::generate_voter_page_fn::(&voter_pages); + let target_index_fn = helpers::target_index_fn::(&all_targets); + + // now flatten the voters, ready to be used as if pagination did not existed. + let all_voters: AllVoterPagesFlattenedOf = voter_pages + .iter() + .cloned() + .flatten() + .collect::>() + .try_into() + .expect("Flattening the voters into `AllVoterPagesFlattenedOf` cannot fail; qed"); + + let ElectionResult { winners: _, assignments } = T::Solver::solve( + desired_targets as usize, + all_targets.clone().to_vec(), + all_voters.clone().into_inner(), + ) + .map_err(|e| MinerError::Solver(e))?; + + // reduce and trim supports. We don't trim length and weight here, since those are dependent + // on the final form of the solution ([`PagedRawSolution`]), thus we do it later. + let trimmed_assignments = { + // Implementation note: the overall code path is as follows: election_results -> + // assignments -> staked assignments -> reduce -> supports -> trim supports -> staked + // assignments -> final assignments + // This is by no means the most performant, but is the clear and correct. + use sp_npos_elections::{ + assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, + reduce, supports_to_staked_assignment, to_supports, EvaluateSupport, + }; + + // These closures are of no use in the rest of these code, since they only deal with the + // overall list of voters. + let cache = helpers::generate_voter_cache::(&all_voters); + let stake_of = helpers::stake_of_fn::(&all_voters, &cache); + + // 1. convert to staked and reduce + let (reduced_count, staked) = { + let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of) + .map_err::, _>(Into::into)?; + + // first, reduce the solution if requested. This will already remove a lot of + // "redundant" and reduce the chance for the need of any further trimming. + let count = if do_reduce { reduce(&mut staked) } else { 0 }; + (count, staked) + }; + + // 2. trim the supports by FINAL backing. + let (_pre_score, final_trimmed_assignments, winners_removed, backers_removed) = { + // these supports could very well be invalid for SCORE purposes. The reason is that + // you might trim out half of an account's stake, but we don't look for this + // account's other votes to fix it. + let supports_invalid_score = to_supports(&staked); + + let pre_score = (&supports_invalid_score).evaluate(); + let (bounded_invalid_score, winners_removed, backers_removed) = + FullSupportsOfMiner::::sorted_truncate_from(supports_invalid_score); + + // now recreated the staked assignments + let staked = supports_to_staked_assignment(bounded_invalid_score.into()); + let assignments = assignment_staked_to_ratio_normalized(staked) + .map_err::, _>(Into::into)?; + (pre_score, assignments, winners_removed, backers_removed) + }; + + miner_log!( + debug, + "initial score = {:?}, reduced {} edges, trimmed {} winners and {} backers due to global support limits", + _pre_score, + reduced_count, + winners_removed, + backers_removed, + ); + + final_trimmed_assignments + }; + + // split the assignments into different pages. + let mut paged_assignments: BoundedVec>, T::Pages> = + BoundedVec::with_bounded_capacity(pages as usize); + paged_assignments.bounded_resize(pages as usize, Default::default()); + + for assignment in trimmed_assignments { + // NOTE: this `page` index is LOCAL. It does not correspond to the actual page index of + // the snapshot map, but rather the index in the `voter_pages`. + let page = voter_page_fn(&assignment.who).ok_or(MinerError::InvalidPage)?; + let assignment_page = + paged_assignments.get_mut(page as usize).ok_or(MinerError::InvalidPage)?; + assignment_page.push(assignment); + } + + // convert each page to a compact struct -- no more change allowed. + let solution_pages: BoundedVec, T::Pages> = paged_assignments + .into_iter() + .enumerate() + .map(|(page_index, assignment_page)| { + // get the page of the snapshot that corresponds to this page of the assignments. + let page: PageIndex = page_index.saturated_into(); + let voter_snapshot_page = voter_pages + .get(page as usize) + .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(page)))?; + + // one last trimming -- `MaxBackersPerWinner`, the per-page variant. + let trimmed_assignment_page = + Self::trim_supports_max_backers_per_winner_per_page( + assignment_page, + voter_snapshot_page, + page_index as u32, + )?; + + let voter_index_fn = { + let cache = helpers::generate_voter_cache::(&voter_snapshot_page); + helpers::voter_index_fn_owned::(cache) + }; + + >::from_assignment( + &trimmed_assignment_page, + &voter_index_fn, + &target_index_fn, + ) + .map_err::, _>(Into::into) + }) + .collect::, _>>()? + .try_into() + .expect("`paged_assignments` is bound by `T::Pages`; length cannot change in iter chain; qed"); + + // now do the length trim. + let mut solution_pages_unbounded = solution_pages.into_inner(); + let _trim_length_weight = + Self::maybe_trim_weight_and_len(&mut solution_pages_unbounded, &voter_pages)?; + let solution_pages = solution_pages_unbounded + .try_into() + .expect("maybe_trim_weight_and_len cannot increase the length of its input; qed."); + miner_log!(debug, "trimmed {} voters due to length restriction.", _trim_length_weight); + + // finally, wrap everything up. Assign a fake score here, since we might need to re-compute + // it. + let mut paged = PagedRawSolution { round, solution_pages, score: Default::default() }; + + // OPTIMIZATION: we do feasibility_check inside `compute_score`, and once later + // pre_dispatch. I think it is fine, but maybe we can improve it. + let score = Self::compute_score(&paged, &voter_pages, &all_targets, desired_targets) + .map_err::, _>(Into::into)?; + paged.score = score; + + miner_log!( + info, + "mined a solution with {} pages, score {:?}, {} winners, {} voters, {} edges, and {} bytes", + pages, + score, + paged.winner_count_single_page_target_snapshot(), + paged.voter_count(), + paged.edge_count(), + paged.using_encoded(|b| b.len()) + ); + + Ok(paged) + } + + /// perform the feasibility check on all pages of a solution, returning `Ok(())` if all good and + /// the corresponding error otherwise. + pub fn check_feasibility( + paged_solution: &PagedRawSolution, + paged_voters: &AllVoterPagesOf, + snapshot_targets: &BoundedVec, + desired_targets: u32, + ) -> Result>, MinerError> { + // check every solution page for feasibility. + let padded_voters = paged_voters.clone().pad_solution_pages(T::Pages::get()); + paged_solution + .solution_pages + .pagify(T::Pages::get()) + .map(|(page_index, page_solution)| { + match verifier::feasibility_check_page_inner_with_snapshot::( + page_solution.clone(), + &padded_voters[page_index as usize], + snapshot_targets, + desired_targets, + ) { + Ok(x) => { + miner_log!(debug, "feasibility check of page {:?} was okay", page_index,); + Ok(x) + }, + Err(e) => { + miner_log!( + warn, + "feasibility check of page {:?} {:?} failed for solution because: {:?}", + page_index, + page_solution, + e, + ); + Err(e) + }, + } + }) + .collect::, _>>() + .map_err(|err| MinerError::from(err)) + .and_then(|supports| { + // If we someday want to check `MaxBackersPerWinnerFinal`, it would be here. + Ok(supports) + }) + } + + /// Take the given raw paged solution and compute its score. This will replicate what the chain + /// would do as closely as possible, and expects all the corresponding snapshot data to be + /// available. + fn compute_score( + paged_solution: &PagedRawSolution, + paged_voters: &AllVoterPagesOf, + all_targets: &BoundedVec, + desired_targets: u32, + ) -> Result> { + let all_supports = + Self::check_feasibility(paged_solution, paged_voters, all_targets, desired_targets)?; + let mut total_backings: BTreeMap = BTreeMap::new(); + all_supports.into_iter().flat_map(|x| x.0).for_each(|(who, support)| { + let backing = total_backings.entry(who).or_default(); + *backing = backing.saturating_add(support.total); + }); + + let all_supports = total_backings + .into_iter() + .map(|(who, total)| (who, Support { total, ..Default::default() })) + .collect::>(); + + Ok((&all_supports).evaluate()) + } + + fn trim_supports_max_backers_per_winner_per_page( + untrimmed_assignments: Vec>, + page_voters: &VoterPageOf, + page: PageIndex, + ) -> Result>, MinerError> { + use sp_npos_elections::{ + assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, + supports_to_staked_assignment, to_supports, + }; + // convert to staked + let cache = helpers::generate_voter_cache::(page_voters); + let stake_of = helpers::stake_of_fn::(&page_voters, &cache); + let untrimmed_staked_assignments = + assignment_ratio_to_staked_normalized(untrimmed_assignments, &stake_of)?; + + // convert to supports + let supports = to_supports(&untrimmed_staked_assignments); + drop(untrimmed_staked_assignments); + + // Convert it to our desired bounds, which will truncate the smallest backers if need + // be. + let (bounded, winners_removed, backers_removed) = + PageSupportsOfMiner::::sorted_truncate_from(supports); + + miner_log!( + debug, + "trimmed {} winners and {} backers from page {} due to per-page limits", + winners_removed, + backers_removed, + page + ); + + // convert back to staked + let trimmed_staked_assignments = supports_to_staked_assignment(bounded.into()); + // and then ratio assignments + let trimmed_assignments = + assignment_staked_to_ratio_normalized(trimmed_staked_assignments)?; + + Ok(trimmed_assignments) + } + + /// Maybe tim the weight and length of the given multi-page solution. + /// + /// Returns the number of voters removed. + /// + /// If either of the bounds are not met, the trimming strategy is as follows: + /// + /// Start from the least significant page. Assume only this page is going to be trimmed. call + /// `page.sort()` on this page. This will make sure in each field (`votes1`, `votes2`, etc.) of + /// that page, the voters are sorted by descending stake. Then, we compare the last item of each + /// field. This is the process of removing the single least staked voter. + /// + /// We repeat this until satisfied, for both weight and length. If a full page is removed, but + /// the bound is not satisfied, we need to make sure that we sort the next least valuable page, + /// and repeat the same process. + /// + /// NOTE: this is a public function to be used by the `OffchainWorkerMiner` or any similar one, + /// based on the submission strategy. The length and weight bounds of a call are dependent on + /// the number of pages being submitted, the number of blocks over which we submit, and the type + /// of the transaction and its weight (e.g. signed or unsigned). + /// + /// NOTE: It could be that this function removes too many voters, and the solution becomes + /// invalid. This is not yet handled and only a warning is emitted. + pub fn maybe_trim_weight_and_len( + solution_pages: &mut Vec>, + paged_voters: &AllVoterPagesOf, + ) -> Result> { + debug_assert_eq!(solution_pages.len(), paged_voters.len()); + let size_limit = T::MaxLength::get(); + + let needs_any_trim = |solution_pages: &mut Vec>| { + let size = solution_pages.encoded_size() as u32; + let needs_len_trim = size > size_limit; + // a reminder that we used to have weight trimming here, but not more! + let needs_weight_trim = false; + needs_weight_trim || needs_len_trim + }; + + // Note the solution might be partial. In either case, this is its least significant page. + let mut current_trimming_page = 0; + let current_trimming_page_stake_of = |current_trimming_page: usize| { + Box::new(move |voter_index: &SolutionVoterIndexOf| -> VoteWeight { + paged_voters + .get(current_trimming_page) + .and_then(|page_voters| { + page_voters + .get((*voter_index).saturated_into::()) + .map(|(_, s, _)| *s) + }) + .unwrap_or_default() + }) + }; + + let sort_current_trimming_page = + |current_trimming_page: usize, solution_pages: &mut Vec>| { + solution_pages.get_mut(current_trimming_page).map(|solution_page| { + let stake_of_fn = current_trimming_page_stake_of(current_trimming_page); + solution_page.sort(stake_of_fn) + }); + }; + + let is_empty = |solution_pages: &Vec>| { + solution_pages.iter().all(|page| page.voter_count().is_zero()) + }; + + if needs_any_trim(solution_pages) { + sort_current_trimming_page(current_trimming_page, solution_pages) + } + + // Implementation note: we want `solution_pages` and `paged_voters` to remain in sync, so + // while one of the pages of `solution_pages` might become "empty" we prefer not removing + // it. This has a slight downside that even an empty pages consumes a few dozens of bytes, + // which we accept for code simplicity. + + let mut removed = 0; + while needs_any_trim(solution_pages) && !is_empty(solution_pages) { + if let Some(removed_idx) = + solution_pages.get_mut(current_trimming_page).and_then(|page| { + let stake_of_fn = current_trimming_page_stake_of(current_trimming_page); + page.remove_weakest_sorted(&stake_of_fn) + }) { + miner_log!( + trace, + "removed voter at index {:?} of (un-pagified) page {} as the weakest due to weight/length limits.", + removed_idx, + current_trimming_page + ); + // we removed one person, continue. + removed.saturating_inc(); + } else { + // this page cannot support remove anymore. Try and go to the next page. + miner_log!( + debug, + "page {} seems to be fully empty now, moving to the next one", + current_trimming_page + ); + let next_page = current_trimming_page.saturating_add(1); + if paged_voters.len() > next_page { + current_trimming_page = next_page; + sort_current_trimming_page(current_trimming_page, solution_pages); + } else { + miner_log!( + warn, + "no more pages to trim from at page {}, already trimmed", + current_trimming_page + ); + break + } + } + } + + Ok(removed) + } +} + +/// A miner that is suited to work inside offchain worker environment. +/// +/// This is parameterized by [`Config`], rather than [`MinerConfig`]. +pub struct OffchainWorkerMiner(sp_std::marker::PhantomData); + +impl OffchainWorkerMiner { + /// Storage key used to store the offchain worker running status. + pub(crate) const OFFCHAIN_LOCK: &'static [u8] = b"parity/multi-block-unsigned-election/lock"; + /// Storage key used to store the last block number at which offchain worker ran. + const OFFCHAIN_LAST_BLOCK: &'static [u8] = b"parity/multi-block-unsigned-election"; + /// Storage key used to cache the solution `call` and its snapshot fingerprint. + const OFFCHAIN_CACHED_CALL: &'static [u8] = b"parity/multi-block-unsigned-election/call"; + + pub(crate) fn fetch_snapshot( + pages: PageIndex, + ) -> Result< + (AllVoterPagesOf, BoundedVec, u32), + OffchainMinerError, + > { + // read the appropriate snapshot pages. + let desired_targets = crate::Snapshot::::desired_targets() + .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::DesiredTargets))?; + let all_targets = crate::Snapshot::::targets() + .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Targets))?; + + // This is the range of voters that we are interested in. + let voter_pages_range = crate::Pallet::::msp_range_for(pages as usize); + + sublog!( + debug, + "unsigned::base-miner", + "mining a solution with {} pages, voter snapshot range will be: {:?}", + pages, + voter_pages_range + ); + + // NOTE: if `pages (2) < T::Pages (3)`, at this point this vector will have length 2, + // with a layout of `[snapshot(1), snapshot(2)]`, namely the two most significant pages + // of the snapshot. + let voter_pages: BoundedVec<_, T::Pages> = voter_pages_range + .into_iter() + .map(|p| { + crate::Snapshot::::voters(p) + .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(p))) + }) + .collect::, _>>()? + .try_into() + .expect( + "`voter_pages_range` has `.take(pages)`; it must have length less than pages; it + must convert to `BoundedVec`; qed", + ); + + Ok((voter_pages, all_targets, desired_targets)) + } + + pub(crate) fn mine_solution( + pages: PageIndex, + do_reduce: bool, + ) -> Result, OffchainMinerError> { + let (voter_pages, all_targets, desired_targets) = Self::fetch_snapshot(pages)?; + let round = crate::Pallet::::round(); + BaseMiner::::mine_solution(MineInput { + desired_targets, + all_targets, + voter_pages, + pages, + do_reduce, + round, + }) + .map_err(Into::into) + } + + /// Get a checked solution from the base miner, ensure unsigned-specific checks also pass, then + /// return an submittable call. + fn mine_checked_call() -> Result, OffchainMinerError> { + // we always do reduce in the offchain worker miner. + let reduce = true; + + // NOTE: we don't run any checks in the base miner, and run all of them via + // `Self::full_checks`. + let paged_solution = Self::mine_solution(T::MinerPages::get(), reduce) + .map_err::, _>(Into::into)?; + // check the call fully, no fingerprinting. + let _ = Self::check_solution(&paged_solution, None, true)?; + + let call: Call = + Call::::submit_unsigned { paged_solution: Box::new(paged_solution) }.into(); + + Ok(call) + } + + /// Mine a new checked solution, cache it, and submit it back to the chain as an unsigned + /// transaction. + pub fn mine_check_save_submit() -> Result<(), OffchainMinerError> { + sublog!(debug, "unsigned::ocw-miner", "miner attempting to compute an unsigned solution."); + let call = Self::mine_checked_call()?; + Self::save_solution(&call, crate::Snapshot::::fingerprint())?; + Self::submit_call(call) + } + + /// Check the solution, from the perspective of the offchain-worker miner: + /// + /// 1. unsigned-specific checks. + /// 2. full-checks of the base miner + /// 1. optionally feasibility check. + /// 2. snapshot-independent checks. + /// 1. optionally, snapshot fingerprint. + pub fn check_solution( + paged_solution: &PagedRawSolution, + maybe_snapshot_fingerprint: Option, + do_feasibility: bool, + ) -> Result<(), OffchainMinerError> { + // NOTE: we prefer cheap checks first, so first run unsigned checks. + Pallet::::unsigned_specific_checks(paged_solution)?; + Self::base_check_solution(paged_solution, maybe_snapshot_fingerprint, do_feasibility) + } + + fn submit_call(call: Call) -> Result<(), OffchainMinerError> { + sublog!( + debug, + "unsigned::ocw-miner", + "miner submitting a solution as an unsigned transaction" + ); + let xt = T::create_inherent(call.into()); + frame_system::offchain::SubmitTransaction::>::submit_transaction(xt) + .map(|_| { + sublog!( + debug, + "unsigned::ocw-miner", + "miner submitted a solution as an unsigned transaction", + ); + }) + .map_err(|_| OffchainMinerError::PoolSubmissionFailed) + } + + /// Check the solution, from the perspective of the base miner: + /// + /// 1. snapshot-independent checks. + /// - with the fingerprint check being an optional step fo that. + /// 2. optionally, feasibility check. + /// + /// In most cases, you should always use this either with `do_feasibility = true` or + /// `maybe_snapshot_fingerprint.is_some()`. Doing both could be an overkill. The snapshot + /// staying constant (which can be checked via the hash) is a string guarantee that the + /// feasibility still holds. + /// + /// The difference between this and [`Self::check_solution`] is that this does not run unsigned + /// specific checks. + pub(crate) fn base_check_solution( + paged_solution: &PagedRawSolution, + maybe_snapshot_fingerprint: Option, + do_feasibility: bool, + ) -> Result<(), OffchainMinerError> { + let _ = crate::Pallet::::snapshot_independent_checks( + paged_solution, + maybe_snapshot_fingerprint, + )?; + + if do_feasibility { + let (voter_pages, all_targets, desired_targets) = + Self::fetch_snapshot(paged_solution.solution_pages.len() as PageIndex)?; + let _ = BaseMiner::::check_feasibility( + &paged_solution, + &voter_pages, + &all_targets, + desired_targets, + )?; + } + + Ok(()) + } + + /// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way, + /// submit if our call's score is greater than that of the cached solution. + pub fn restore_or_compute_then_maybe_submit() -> Result<(), OffchainMinerError> { + sublog!( + debug, + "unsigned::ocw-miner", + "miner attempting to restore or compute an unsigned solution." + ); + + let call = Self::restore_solution() + .and_then(|(call, snapshot_fingerprint)| { + // ensure the cached call is still current before submitting + if let Call::submit_unsigned { paged_solution, .. } = &call { + // we check the snapshot fingerprint instead of doing a full feasibility. + OffchainWorkerMiner::::check_solution( + paged_solution, + Some(snapshot_fingerprint), + false, + ).map_err::, _>(Into::into)?; + Ok(call) + } else { + Err(OffchainMinerError::SolutionCallInvalid) + } + }) + .or_else::, _>(|error| { + use OffchainMinerError as OE; + use MinerError as ME; + use CommonError as CE; + match error { + OE::NoStoredSolution => { + // IFF, not present regenerate. + let call = Self::mine_checked_call()?; + Self::save_solution(&call, crate::Snapshot::::fingerprint())?; + Ok(call) + }, + OE::Common(ref e) => { + sublog!( + error, + "unsigned::ocw-miner", + "unsigned specific checks failed ({:?}) while restoring solution. This should never happen. clearing cache.", + e, + ); + Self::clear_offchain_solution_cache(); + Err(error) + }, + OE::BaseMiner(ME::Feasibility(_)) + | OE::BaseMiner(ME::Common(CE::WrongRound)) + | OE::BaseMiner(ME::Common(CE::WrongFingerprint)) + => { + // note that failing `Feasibility` can only mean that the solution was + // computed over a snapshot that has changed due to a fork. + sublog!(warn, "unsigned::ocw-miner", "wiping infeasible solution ({:?}).", error); + // kill the "bad" solution. + Self::clear_offchain_solution_cache(); + + // .. then return the error as-is. + Err(error) + }, + _ => { + sublog!(debug, "unsigned::ocw-miner", "unhandled error in restoring offchain solution {:?}", error); + // nothing to do. Return the error as-is. + Err(error) + }, + } + })?; + + Self::submit_call(call) + } + + /// Checks if an execution of the offchain worker is permitted at the given block number, or + /// not. + /// + /// This makes sure that + /// 1. we don't run on previous blocks in case of a re-org + /// 2. we don't run twice within a window of length `T::OffchainRepeat`. + /// + /// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If + /// `Ok()` is returned, `now` is written in storage and will be used in further calls as the + /// baseline. + pub fn ensure_offchain_repeat_frequency( + now: BlockNumberFor, + ) -> Result<(), OffchainMinerError> { + let threshold = T::OffchainRepeat::get(); + let last_block = StorageValueRef::persistent(&Self::OFFCHAIN_LAST_BLOCK); + + let mutate_stat = last_block.mutate::<_, &'static str, _>( + |maybe_head: Result>, _>| { + match maybe_head { + Ok(Some(head)) if now < head => Err("fork."), + Ok(Some(head)) if now >= head && now <= head + threshold => + Err("recently executed."), + Ok(Some(head)) if now > head + threshold => { + // we can run again now. Write the new head. + Ok(now) + }, + _ => { + // value doesn't exists. Probably this node just booted up. Write, and + // run + Ok(now) + }, + } + }, + ); + + match mutate_stat { + // all good + Ok(_) => Ok(()), + // failed to write. + Err(MutateStorageError::ConcurrentModification(_)) => Err(OffchainMinerError::Lock( + "failed to write to offchain db (concurrent modification).", + )), + // fork etc. + Err(MutateStorageError::ValueFunctionFailed(why)) => Err(OffchainMinerError::Lock(why)), + } + } + + /// Save a given call into OCW storage. + fn save_solution( + call: &Call, + snapshot_fingerprint: T::Hash, + ) -> Result<(), OffchainMinerError> { + sublog!(debug, "unsigned::ocw-miner", "saving a call to the offchain storage."); + let storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL); + match storage.mutate::<_, (), _>(|_| Ok((call.clone(), snapshot_fingerprint))) { + Ok(_) => Ok(()), + Err(MutateStorageError::ConcurrentModification(_)) => + Err(OffchainMinerError::FailedToStoreSolution), + Err(MutateStorageError::ValueFunctionFailed(_)) => { + // this branch should be unreachable according to the definition of + // `StorageValueRef::mutate`: that function should only ever `Err` if the closure we + // pass it returns an error. however, for safety in case the definition changes, we + // do not optimize the branch away or panic. + Err(OffchainMinerError::FailedToStoreSolution) + }, + } + } + + /// Get a saved solution from OCW storage if it exists. + fn restore_solution() -> Result<(Call, T::Hash), OffchainMinerError> { + StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL) + .get() + .ok() + .flatten() + .ok_or(OffchainMinerError::NoStoredSolution) + } + + /// Clear a saved solution from OCW storage. + fn clear_offchain_solution_cache() { + sublog!(debug, "unsigned::ocw-miner", "clearing offchain call cache storage."); + let mut storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL); + storage.clear(); + } + + #[cfg(test)] + fn cached_solution() -> Option> { + StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL) + .get::>() + .unwrap() + } +} + +// This will only focus on testing the internals of `maybe_trim_weight_and_len_works`. +#[cfg(test)] +mod trimming { + use super::*; + use crate::{mock::*, verifier::Verifier}; + use frame_election_provider_support::TryFromUnboundedPagedSupports; + use sp_npos_elections::Support; + + #[test] + fn solution_without_any_trimming() { + ExtBuilder::unsigned().build_and_execute(|| { + // adjust the voters a bit, such that they are all different backings + let mut current_voters = Voters::get(); + current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); + Voters::set(current_voters); + + roll_to_snapshot_created(); + + // now we let the miner mine something for us.. + let solution = mine_full_solution().unwrap(); + assert_eq!( + solution.solution_pages.iter().map(|page| page.voter_count()).sum::(), + 8 + ); + + assert_eq!(solution.solution_pages.encoded_size(), 105); + + load_mock_signed_and_start(solution); + let supports = roll_to_full_verification(); + + // a solution is queued. + assert!(VerifierPallet::queued_score().is_some()); + + assert_eq!( + supports, + vec![ + vec![ + (30, Support { total: 30, voters: vec![(30, 30)] }), + (40, Support { total: 40, voters: vec![(40, 40)] }) + ], + vec![ + (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), + (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) + ], + vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }) + } + + #[test] + fn trim_length() { + ExtBuilder::unsigned().miner_max_length(104).build_and_execute(|| { + // adjust the voters a bit, such that they are all different backings + let mut current_voters = Voters::get(); + current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); + Voters::set(current_voters); + + roll_to_snapshot_created(); + ensure_voters(3, 12); + + let solution = mine_full_solution().unwrap(); + + assert_eq!( + solution.solution_pages.iter().map(|page| page.voter_count()).sum::(), + 7 + ); + + assert_eq!(solution.solution_pages.encoded_size(), 99); + + load_mock_signed_and_start(solution); + let supports = roll_to_full_verification(); + + // a solution is queued. + assert!(VerifierPallet::queued_score().is_some()); + + assert_eq!( + supports, + vec![ + // 30 is gone! Note that length trimming starts from lsp, so we trim from this + // page only. + vec![(40, Support { total: 40, voters: vec![(40, 40)] })], + vec![ + (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), + (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) + ], + vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }); + } + + #[test] + fn trim_length_2() { + ExtBuilder::unsigned().miner_max_length(98).build_and_execute(|| { + // adjust the voters a bit, such that they are all different backings + let mut current_voters = Voters::get(); + current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); + Voters::set(current_voters); + + roll_to_snapshot_created(); + ensure_voters(3, 12); + + let solution = mine_full_solution().unwrap(); + + assert_eq!( + solution.solution_pages.iter().map(|page| page.voter_count()).sum::(), + 6 + ); + + assert_eq!(solution.solution_pages.encoded_size(), 93); + + load_mock_signed_and_start(solution); + let supports = roll_to_full_verification(); + + // a solution is queued. + assert!(VerifierPallet::queued_score().is_some()); + + assert_eq!( + supports, + vec![ + vec![], + vec![ + (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), + (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) + ], + vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }); + } + + #[test] + fn trim_length_3() { + ExtBuilder::unsigned().miner_max_length(92).build_and_execute(|| { + // adjust the voters a bit, such that they are all different backings + let mut current_voters = Voters::get(); + current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); + Voters::set(current_voters); + + roll_to_snapshot_created(); + ensure_voters(3, 12); + + let solution = mine_full_solution().unwrap(); + + assert_eq!( + solution.solution_pages.iter().map(|page| page.voter_count()).sum::(), + 5 + ); + + assert_eq!(solution.solution_pages.encoded_size(), 83); + + load_mock_signed_and_start(solution); + let supports = roll_to_full_verification(); + + // a solution is queued. + assert!(VerifierPallet::queued_score().is_some()); + + assert_eq!( + dbg!(supports), + vec![ + vec![], + vec![ + (30, Support { total: 9, voters: vec![(7, 7), (6, 2)] }), + (40, Support { total: 4, voters: vec![(6, 4)] }) + ], + vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }); + } + + #[test] + fn trim_backers_per_page_works() { + ExtBuilder::unsigned().max_backers_per_winner(2).build_and_execute(|| { + // adjust the voters a bit, such that they are all different backings + let mut current_voters = Voters::get(); + current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); + Voters::set(current_voters); + + roll_to_snapshot_created(); + ensure_voters(3, 12); + + let solution = mine_full_solution().unwrap(); + + load_mock_signed_and_start(solution); + let supports = roll_to_full_verification(); + + // a solution is queued. + assert!(VerifierPallet::queued_score().is_some()); + + // each page is trimmed individually, based on `solution_without_any_trimming`. + assert_eq!( + supports, + vec![ + vec![ + (30, Support { total: 30, voters: vec![(30, 30)] }), + (40, Support { total: 40, voters: vec![(40, 40)] }) + ], + vec![ + (30, Support { total: 9, voters: vec![(7, 7), (6, 2)] }), + (40, Support { total: 9, voters: vec![(5, 5), (6, 4)] }) /* notice how + * 5's stake is + * re-distributed + * all here ^^ */ + ], + vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }) + } + + #[test] + fn trim_backers_per_page_works_2() { + // This one is more interesting, as it also shows that as we trim backers, we re-distribute + // their weight elsewhere. + ExtBuilder::unsigned().max_backers_per_winner(1).build_and_execute(|| { + // adjust the voters a bit, such that they are all different backings + let mut current_voters = Voters::get(); + current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); + Voters::set(current_voters); + + roll_to_snapshot_created(); + ensure_voters(3, 12); + + let solution = mine_full_solution().unwrap(); + + load_mock_signed_and_start(solution); + let supports = roll_to_full_verification(); + + // a solution is queued. + assert!(VerifierPallet::queued_score().is_some()); + + // each page is trimmed individually, based on `solution_without_any_trimming`. + assert_eq!( + supports, + vec![ + vec![ + (30, Support { total: 30, voters: vec![(30, 30)] }), + (40, Support { total: 40, voters: vec![(40, 40)] }) + ], + vec![ + (30, Support { total: 7, voters: vec![(7, 7)] }), + (40, Support { total: 6, voters: vec![(6, 6)] }) + ], + vec![(40, Support { total: 4, voters: vec![(4, 4)] })] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }) + } + + #[test] + fn trim_backers_final_works() { + ExtBuilder::unsigned().max_backers_per_winner_final(4).build_and_execute(|| { + // adjust the voters a bit, such that they are all different backings + let mut current_voters = Voters::get(); + current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); + Voters::set(current_voters); + + roll_to_snapshot_created(); + ensure_voters(3, 12); + + let solution = mine_full_solution().unwrap(); + + load_mock_signed_and_start(solution); + let supports = roll_to_full_verification(); + + // a solution is queued. + assert!(VerifierPallet::queued_score().is_some()); + + // 30 has 1 + 3 = 4 backers -- all good + // 40 has 1 + 2 + 3 = 6 backers -- needs to lose 2 + assert_eq!( + supports, + vec![ + vec![ + (30, Support { total: 30, voters: vec![(30, 30)] }), + (40, Support { total: 40, voters: vec![(40, 40)] }) + ], + vec![ + (30, Support { total: 14, voters: vec![(5, 5), (7, 7), (6, 2)] }), + (40, Support { total: 4, voters: vec![(6, 4)] }) + ], + vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }) + } + + #[test] + fn trim_backers_per_page_and_final_works() { + ExtBuilder::unsigned() + .max_backers_per_winner_final(4) + .max_backers_per_winner(2) + .build_and_execute(|| { + // adjust the voters a bit, such that they are all different backings + let mut current_voters = Voters::get(); + current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); + Voters::set(current_voters); + + roll_to_snapshot_created(); + ensure_voters(3, 12); + + let solution = mine_full_solution().unwrap(); + + load_mock_signed_and_start(solution); + let supports = roll_to_full_verification(); + + // a solution is queued. + assert!(VerifierPallet::queued_score().is_some()); + + // each page is trimmed individually, based on `solution_without_any_trimming`. + assert_eq!( + supports, + vec![ + vec![ + (30, Support { total: 30, voters: vec![(30, 30)] }), + (40, Support { total: 40, voters: vec![(40, 40)] }) + ], + vec![ + (30, Support { total: 12, voters: vec![(5, 5), (7, 7)] }), + (40, Support { total: 6, voters: vec![(6, 6)] }) + ], + vec![(40, Support { total: 7, voters: vec![(3, 3), (4, 4)] })] + ] + .try_from_unbounded_paged() + .unwrap() + ); + }) + } +} + +#[cfg(test)] +mod base_miner { + use std::vec; + + use super::*; + use crate::{mock::*, Snapshot}; + use frame_election_provider_support::TryFromUnboundedPagedSupports; + use sp_npos_elections::Support; + use sp_runtime::PerU16; + + #[test] + fn pagination_does_not_affect_score() { + let score_1 = ExtBuilder::unsigned() + .pages(1) + .voter_per_page(12) + .build_unchecked() + .execute_with(|| { + roll_to_snapshot_created(); + mine_full_solution().unwrap().score + }); + let score_2 = ExtBuilder::unsigned() + .pages(2) + .voter_per_page(6) + .build_unchecked() + .execute_with(|| { + roll_to_snapshot_created(); + mine_full_solution().unwrap().score + }); + let score_3 = ExtBuilder::unsigned() + .pages(3) + .voter_per_page(4) + .build_unchecked() + .execute_with(|| { + roll_to_snapshot_created(); + mine_full_solution().unwrap().score + }); + + assert_eq!(score_1, score_2); + assert_eq!(score_2, score_3); + } + + #[test] + fn mine_solution_single_page_works() { + ExtBuilder::unsigned().pages(1).voter_per_page(8).build_and_execute(|| { + roll_to_snapshot_created(); + + ensure_voters(1, 8); + ensure_targets(1, 4); + + assert_eq!( + Snapshot::::voters(0) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![1, 2, 3, 4, 5, 6, 7, 8] + ); + + let paged = mine_full_solution().unwrap(); + assert_eq!(paged.solution_pages.len(), 1); + + // this solution must be feasible and submittable. + OffchainWorkerMiner::::base_check_solution(&paged, None, true).unwrap(); + + // now do a realistic full verification + load_mock_signed_and_start(paged.clone()); + let supports = roll_to_full_verification(); + + assert_eq!( + supports, + vec![vec![ + (10, Support { total: 30, voters: vec![(1, 10), (8, 10), (4, 5), (5, 5)] }), + ( + 40, + Support { + total: 40, + voters: vec![(2, 10), (3, 10), (6, 10), (4, 5), (5, 5)] + } + ) + ]] + .try_from_unbounded_paged() + .unwrap() + ); + + // NOTE: this is the same as the score of any other test that contains the first 8 + // voters, we already test for this in `pagination_does_not_affect_score`. + assert_eq!( + paged.score, + ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 } + ); + }) + } + + #[test] + fn mine_solution_double_page_works() { + ExtBuilder::unsigned().pages(2).voter_per_page(4).build_and_execute(|| { + roll_to_snapshot_created(); + + // 2 pages of 8 voters + ensure_voters(2, 8); + // 1 page of 4 targets + ensure_targets(1, 4); + + // voters in pages. note the reverse page index. + assert_eq!( + Snapshot::::voters(0) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![5, 6, 7, 8] + ); + assert_eq!( + Snapshot::::voters(1) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![1, 2, 3, 4] + ); + // targets in pages. + assert_eq!(Snapshot::::targets().unwrap(), vec![10, 20, 30, 40]); + let paged = mine_full_solution().unwrap(); + + assert_eq!( + paged.solution_pages, + vec![ + TestNposSolution { + // voter 6 (index 1) is backing 40 (index 3). + // voter 8 (index 3) is backing 10 (index 0) + votes1: vec![(1, 3), (3, 0)], + // voter 5 (index 0) is backing 40 (index 10) and 10 (index 0) + votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)], + ..Default::default() + }, + TestNposSolution { + // voter 1 (index 0) is backing 10 (index 0) + // voter 2 (index 1) is backing 40 (index 3) + // voter 3 (index 2) is backing 40 (index 3) + votes1: vec![(0, 0), (1, 3), (2, 3)], + // voter 4 (index 3) is backing 40 (index 10) and 10 (index 0) + votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)], + ..Default::default() + }, + ] + ); + + // this solution must be feasible and submittable. + OffchainWorkerMiner::::base_check_solution(&paged, None, false).unwrap(); + + // it must also be verified in the verifier + load_mock_signed_and_start(paged.clone()); + let supports = roll_to_full_verification(); + + assert_eq!( + supports, + vec![ + // page0, supports from voters 5, 6, 7, 8 + vec![ + (10, Support { total: 15, voters: vec![(8, 10), (5, 5)] }), + (40, Support { total: 15, voters: vec![(6, 10), (5, 5)] }) + ], + // page1 supports from voters 1, 2, 3, 4 + vec![ + (10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }), + (40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] }) + ] + ] + .try_from_unbounded_paged() + .unwrap() + ); + + assert_eq!( + paged.score, + ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 } + ); + }) + } + + #[test] + fn mine_solution_triple_page_works() { + ExtBuilder::unsigned().pages(3).voter_per_page(4).build_and_execute(|| { + roll_to_snapshot_created(); + + ensure_voters(3, 12); + ensure_targets(1, 4); + + // voters in pages. note the reverse page index. + assert_eq!( + Snapshot::::voters(2) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![1, 2, 3, 4] + ); + assert_eq!( + Snapshot::::voters(1) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![5, 6, 7, 8] + ); + assert_eq!( + Snapshot::::voters(0) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![10, 20, 30, 40] + ); + + let paged = mine_full_solution().unwrap(); + assert_eq!( + paged.solution_pages, + vec![ + TestNposSolution { votes1: vec![(2, 2), (3, 3)], ..Default::default() }, + TestNposSolution { + votes1: vec![(2, 2)], + votes2: vec![ + (0, [(2, PerU16::from_parts(32768))], 3), + (1, [(2, PerU16::from_parts(32768))], 3) + ], + ..Default::default() + }, + TestNposSolution { + votes1: vec![(2, 3), (3, 3)], + votes2: vec![(1, [(2, PerU16::from_parts(32768))], 3)], + ..Default::default() + }, + ] + ); + + // this solution must be feasible and submittable. + OffchainWorkerMiner::::base_check_solution(&paged, None, true).unwrap(); + // now do a realistic full verification + load_mock_signed_and_start(paged.clone()); + let supports = roll_to_full_verification(); + + assert_eq!( + supports, + vec![ + // page 0: self-votes. + vec![ + (30, Support { total: 30, voters: vec![(30, 30)] }), + (40, Support { total: 40, voters: vec![(40, 40)] }) + ], + // page 1: 5, 6, 7, 8 + vec![ + (30, Support { total: 20, voters: vec![(7, 10), (5, 5), (6, 5)] }), + (40, Support { total: 10, voters: vec![(5, 5), (6, 5)] }) + ], + // page 2: 1, 2, 3, 4 + vec![ + (30, Support { total: 5, voters: vec![(2, 5)] }), + (40, Support { total: 25, voters: vec![(3, 10), (4, 10), (2, 5)] }) + ] + ] + .try_from_unbounded_paged() + .unwrap() + ); + + assert_eq!( + paged.score, + ElectionScore { minimal_stake: 55, sum_stake: 130, sum_stake_squared: 8650 } + ); + }) + } + + #[test] + fn mine_solution_choses_most_significant_pages() { + ExtBuilder::unsigned().pages(2).voter_per_page(4).build_and_execute(|| { + roll_to_snapshot_created(); + + ensure_voters(2, 8); + ensure_targets(1, 4); + + // these folks should be ignored safely. + assert_eq!( + Snapshot::::voters(0) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![5, 6, 7, 8] + ); + // voters in pages 1, this is the most significant page. + assert_eq!( + Snapshot::::voters(1) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![1, 2, 3, 4] + ); + + // now we ask for just 1 page of solution. + let paged = mine_solution(1).unwrap(); + + assert_eq!( + paged.solution_pages, + vec![TestNposSolution { + // voter 1 (index 0) is backing 10 (index 0) + // voter 2 (index 1) is backing 40 (index 3) + // voter 3 (index 2) is backing 40 (index 3) + votes1: vec![(0, 0), (1, 3), (2, 3)], + // voter 4 (index 3) is backing 40 (index 10) and 10 (index 0) + votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)], + ..Default::default() + }] + ); + + // this solution must be feasible and submittable. + OffchainWorkerMiner::::base_check_solution(&paged, None, true).unwrap(); + // now do a realistic full verification. + load_mock_signed_and_start(paged.clone()); + let supports = roll_to_full_verification(); + + assert_eq!( + supports, + vec![ + // page0: non existent. + vec![], + // page1 supports from voters 1, 2, 3, 4 + vec![ + (10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }), + (40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] }) + ] + ] + .try_from_unbounded_paged() + .unwrap() + ); + + assert_eq!( + paged.score, + ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 } + ); + }) + } + + #[test] + fn mine_solution_2_out_of_3_pages() { + ExtBuilder::unsigned().pages(3).voter_per_page(4).build_and_execute(|| { + roll_to_snapshot_created(); + + ensure_voters(3, 12); + ensure_targets(1, 4); + + assert_eq!( + Snapshot::::voters(0) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![10, 20, 30, 40] + ); + assert_eq!( + Snapshot::::voters(1) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![5, 6, 7, 8] + ); + assert_eq!( + Snapshot::::voters(2) + .unwrap() + .into_iter() + .map(|(x, _, _)| x) + .collect::>(), + vec![1, 2, 3, 4] + ); + + // now we ask for just 1 page of solution. + let paged = mine_solution(2).unwrap(); + + // this solution must be feasible and submittable. + OffchainWorkerMiner::::base_check_solution(&paged, None, true).unwrap(); + + assert_eq!( + paged.solution_pages, + vec![ + // this can be "pagified" to snapshot at index 1, which contains 5, 6, 7, 8 + // in which: + // 6 (index:1) votes for 40 (index:3) + // 8 (index:1) votes for 10 (index:0) + // 5 votes for both 10 and 40 + TestNposSolution { + votes1: vec![(1, 3), (3, 0)], + votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)], + ..Default::default() + }, + // this can be 'pagified" to snapshot at index 2, which contains 1, 2, 3, 4 + // in which: + // 1 (index:0) votes for 10 (index:0) + // 2 (index:1) votes for 40 (index:3) + // 3 (index:2) votes for 40 (index:3) + // 4 votes for both 10 and 40 + TestNposSolution { + votes1: vec![(0, 0), (1, 3), (2, 3)], + votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)], + ..Default::default() + } + ] + ); + + // this solution must be feasible and submittable. + OffchainWorkerMiner::::base_check_solution(&paged, None, true).unwrap(); + // now do a realistic full verification. + load_mock_signed_and_start(paged.clone()); + let supports = roll_to_full_verification(); + + assert_eq!( + supports, + vec![ + // empty page 0. + vec![], + // supports from voters 5, 6, 7, 8 + vec![ + (10, Support { total: 15, voters: vec![(8, 10), (5, 5)] }), + (40, Support { total: 15, voters: vec![(6, 10), (5, 5)] }) + ], + // supports from voters 1, 2, 3, 4 + vec![ + (10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }), + (40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] }) + ] + ] + .try_from_unbounded_paged() + .unwrap() + ); + + assert_eq!( + paged.score, + ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 } + ); + }) + } + + #[test] + fn can_reduce_solution() { + ExtBuilder::unsigned().build_and_execute(|| { + roll_to_snapshot_created(); + let full_edges = OffchainWorkerMiner::::mine_solution(Pages::get(), false) + .unwrap() + .solution_pages + .iter() + .fold(0, |acc, x| acc + x.edge_count()); + let reduced_edges = OffchainWorkerMiner::::mine_solution(Pages::get(), true) + .unwrap() + .solution_pages + .iter() + .fold(0, |acc, x| acc + x.edge_count()); + + assert!(reduced_edges < full_edges, "{} < {} not fulfilled", reduced_edges, full_edges); + }) + } +} + +#[cfg(test)] +mod offchain_worker_miner { + use crate::{verifier::Verifier, CommonError}; + use frame_support::traits::Hooks; + use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; + + use super::*; + use crate::mock::*; + + #[test] + fn lock_prevents_frequent_execution() { + let (mut ext, _) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + let offchain_repeat = ::OffchainRepeat::get(); + + // first execution -- okay. + assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency(25).is_ok()); + + // next block: rejected. + assert_noop!( + OffchainWorkerMiner::::ensure_offchain_repeat_frequency(26), + OffchainMinerError::Lock("recently executed.") + ); + + // allowed after `OFFCHAIN_REPEAT` + assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency( + (26 + offchain_repeat).into() + ) + .is_ok()); + + // a fork like situation: re-execute last 3. + assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency( + (26 + offchain_repeat - 3).into() + ) + .is_err()); + assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency( + (26 + offchain_repeat - 2).into() + ) + .is_err()); + assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency( + (26 + offchain_repeat - 1).into() + ) + .is_err()); + }) + } + + #[test] + fn lock_released_after_successful_execution() { + // first, ensure that a successful execution releases the lock + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + let guard = StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_LOCK); + let last_block = + StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_LAST_BLOCK); + + roll_to_unsigned_open(); + + // initially, the lock is not set. + assert!(guard.get::().unwrap().is_none()); + + // a successful a-z execution. + UnsignedPallet::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 1); + + // afterwards, the lock is not set either.. + assert!(guard.get::().unwrap().is_none()); + assert_eq!(last_block.get::().unwrap(), Some(25)); + }); + } + + #[test] + fn lock_prevents_overlapping_execution() { + // ensure that if the guard is in hold, a new execution is not allowed. + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + roll_to_unsigned_open(); + + // artificially set the value, as if another thread is mid-way. + let mut lock = StorageLock::>::with_block_deadline( + OffchainWorkerMiner::::OFFCHAIN_LOCK, + UnsignedPhase::get().saturated_into(), + ); + let guard = lock.lock(); + + // nothing submitted. + UnsignedPallet::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 0); + UnsignedPallet::offchain_worker(26); + assert_eq!(pool.read().transactions.len(), 0); + + drop(guard); + + // 🎉 ! + UnsignedPallet::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 1); + }); + } + + #[test] + fn initial_ocw_runs_and_saves_new_cache() { + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + roll_to_unsigned_open(); + + let last_block = + StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_LAST_BLOCK); + let cache = + StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); + + assert_eq!(last_block.get::(), Ok(None)); + assert_eq!(cache.get::>(), Ok(None)); + + // creates, caches, submits without expecting previous cache value + UnsignedPallet::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 1); + + assert_eq!(last_block.get::(), Ok(Some(25))); + assert!(matches!(cache.get::>(), Ok(Some(_)))); + }) + } + + #[test] + fn ocw_pool_submission_works() { + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + roll_to_unsigned_open(); + + roll_next_with_ocw(Some(pool.clone())); + // OCW must have submitted now + + let encoded = pool.read().transactions[0].clone(); + let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); + let call = extrinsic.function; + assert!(matches!( + call, + crate::mock::RuntimeCall::UnsignedPallet( + crate::unsigned::Call::submit_unsigned { .. } + ) + )); + }) + } + + #[test] + fn resubmits_after_offchain_repeat() { + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + let offchain_repeat = ::OffchainRepeat::get(); + roll_to_unsigned_open(); + + assert!(OffchainWorkerMiner::::cached_solution().is_none()); + // creates, caches, submits without expecting previous cache value + UnsignedPallet::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 1); + let tx_cache = pool.read().transactions[0].clone(); + // assume that the tx has been processed + pool.try_write().unwrap().transactions.clear(); + + // attempts to resubmit the tx after the threshold has expired. + UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat); + assert_eq!(pool.read().transactions.len(), 1); + + // resubmitted tx is identical to first submission + let tx = &pool.read().transactions[0]; + assert_eq!(&tx_cache, tx); + }) + } + + #[test] + fn regenerates_and_resubmits_after_offchain_repeat_if_no_cache() { + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + let offchain_repeat = ::OffchainRepeat::get(); + roll_to_unsigned_open(); + + assert!(OffchainWorkerMiner::::cached_solution().is_none()); + // creates, caches, submits without expecting previous cache value. + UnsignedPallet::offchain_worker(25); + assert_eq!(pool.read().transactions.len(), 1); + let tx_cache = pool.read().transactions[0].clone(); + // assume that the tx has been processed + pool.try_write().unwrap().transactions.clear(); + + // remove the cached submitted tx. + // this ensures that when the resubmit window rolls around, we're ready to regenerate + // from scratch if necessary + let mut call_cache = + StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); + assert!(matches!(call_cache.get::>(), Ok(Some(_)))); + call_cache.clear(); + + // attempts to resubmit the tx after the threshold has expired + UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat); + assert_eq!(pool.read().transactions.len(), 1); + + // resubmitted tx is identical to first submission + let tx = &pool.read().transactions[0]; + assert_eq!(&tx_cache, tx); + }) + } + + #[test] + fn altering_snapshot_invalidates_solution_cache() { + // by infeasible, we mean here that if the snapshot fingerprint has changed. + let (mut ext, pool) = ExtBuilder::unsigned().unsigned_phase(999).build_offchainify(); + ext.execute_with_sanity_checks(|| { + let offchain_repeat = ::OffchainRepeat::get(); + roll_to_unsigned_open(); + roll_next_with_ocw(None); + + // something is submitted.. + assert_eq!(pool.read().transactions.len(), 1); + pool.try_write().unwrap().transactions.clear(); + + // ..and cached + let call_cache = + StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); + assert!(matches!(call_cache.get::>(), Ok(Some(_)))); + + // now change the snapshot, ofc this is rare in reality. This makes the cached call + // infeasible. + assert_eq!(crate::Snapshot::::targets().unwrap(), vec![10, 20, 30, 40]); + let pre_fingerprint = crate::Snapshot::::fingerprint(); + crate::Snapshot::::remove_target(0); + let post_fingerprint = crate::Snapshot::::fingerprint(); + assert_eq!(crate::Snapshot::::targets().unwrap(), vec![20, 30, 40]); + assert_ne!(pre_fingerprint, post_fingerprint); + + // now run ocw again + let now = System::block_number(); + roll_to_with_ocw(now + offchain_repeat + 1, None); + // nothing is submitted this time.. + assert_eq!(pool.read().transactions.len(), 0); + // .. and the cache is gone. + assert_eq!(call_cache.get::>(), Ok(None)); + + // upon the next run, we re-generate and submit something fresh again. + roll_to_with_ocw(now + offchain_repeat + offchain_repeat + 2, None); + assert_eq!(pool.read().transactions.len(), 1); + assert!(matches!(call_cache.get::>(), Ok(Some(_)))); + }) + } + + #[test] + fn wont_resubmit_if_weak_score() { + // common case, if the score is weak, don't bother with anything, ideally check from the + // logs that we don't run feasibility in this call path. Score check must come before. + let (mut ext, pool) = ExtBuilder::unsigned().unsigned_phase(999).build_offchainify(); + ext.execute_with_sanity_checks(|| { + let offchain_repeat = ::OffchainRepeat::get(); + // unfortunately there's no pretty way to run the ocw code such that it generates a + // weak, but correct solution. We just write it to cache directly. + roll_to_unsigned_open(); + roll_next_with_ocw(None); + + // something is submitted.. + assert_eq!(pool.read().transactions.len(), 1); + + // ..and cached + let call_cache = + StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); + assert!(matches!(call_cache.get::>(), Ok(Some(_)))); + + // and replace it with something weak. + let weak_solution = raw_paged_from_supports( + vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]], + 0, + ); + let weak_call = crate::unsigned::Call::::submit_unsigned { + paged_solution: Box::new(weak_solution), + }; + call_cache.set(&weak_call); + + // run again + roll_to_with_ocw(System::block_number() + offchain_repeat + 1, Some(pool.clone())); + // nothing is submitted this time.. + assert_eq!(pool.read().transactions.len(), 0); + // .. and the cache IS STILL THERE! + assert!(matches!(call_cache.get::>(), Ok(Some(_)))); + }) + } + + #[test] + fn ocw_submission_e2e_works() { + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + assert!(VerifierPallet::queued_score().is_none()); + roll_to_with_ocw(25 + 1, Some(pool.clone())); + assert!(VerifierPallet::queued_score().is_some()); + + // call is cached. + let call_cache = + StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); + assert!(matches!(call_cache.get::>(), Ok(Some(_)))); + + // pool is empty + assert_eq!(pool.read().transactions.len(), 0); + }) + } + + #[test] + fn ocw_e2e_submits_and_queued_msp_only() { + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + // roll to mine + roll_to_unsigned_open_with_ocw(None); + // one block to verify and submit. + roll_next_with_ocw(Some(pool.clone())); + + assert_eq!( + multi_block_events(), + vec![ + crate::Event::PhaseTransitioned { + from: Phase::Off, + to: Phase::Snapshot(Pages::get()) + }, + crate::Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Unsigned(UnsignedPhase::get() - 1) + } + ] + ); + assert_eq!( + verifier_events(), + vec![ + crate::verifier::Event::Verified(2, 2), + crate::verifier::Event::Queued( + ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 }, + None + ) + ] + ); + assert!(VerifierPallet::queued_score().is_some()); + + // pool is empty + assert_eq!(pool.read().transactions.len(), 0); + }) + } + + #[test] + fn multi_page_ocw_e2e_submits_and_queued_msp_only() { + let (mut ext, pool) = ExtBuilder::unsigned().miner_pages(2).build_offchainify(); + ext.execute_with_sanity_checks(|| { + // roll to mine + roll_to_unsigned_open_with_ocw(None); + // one block to verify and submit. + roll_next_with_ocw(Some(pool.clone())); + + assert_eq!( + multi_block_events(), + vec![ + crate::Event::PhaseTransitioned { + from: Phase::Off, + to: Phase::Snapshot(Pages::get()) + }, + crate::Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Unsigned(UnsignedPhase::get() - 1) + } + ] + ); + assert_eq!( + verifier_events(), + vec![ + crate::verifier::Event::Verified(1, 2), + crate::verifier::Event::Verified(2, 2), + crate::verifier::Event::Queued( + ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 }, + None + ) + ] + ); + assert!(VerifierPallet::queued_score().is_some()); + + // pool is empty + assert_eq!(pool.read().transactions.len(), 0); + }) + } + + #[test] + fn full_multi_page_ocw_e2e_submits_and_queued_msp_only() { + let (mut ext, pool) = ExtBuilder::unsigned().miner_pages(3).build_offchainify(); + ext.execute_with_sanity_checks(|| { + // roll to mine + roll_to_unsigned_open_with_ocw(None); + // one block to verify and submit. + roll_next_with_ocw(Some(pool.clone())); + + assert_eq!( + multi_block_events(), + vec![ + crate::Event::PhaseTransitioned { + from: Phase::Off, + to: Phase::Snapshot(Pages::get()) + }, + crate::Event::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Unsigned(UnsignedPhase::get() - 1) + } + ] + ); + assert_eq!( + verifier_events(), + vec![ + crate::verifier::Event::Verified(0, 2), + crate::verifier::Event::Verified(1, 2), + crate::verifier::Event::Verified(2, 2), + crate::verifier::Event::Queued( + ElectionScore { + minimal_stake: 55, + sum_stake: 130, + sum_stake_squared: 8650 + }, + None + ) + ] + ); + assert!(VerifierPallet::queued_score().is_some()); + + // pool is empty + assert_eq!(pool.read().transactions.len(), 0); + }) + } + + #[test] + fn will_not_mine_if_not_enough_winners() { + // also see `trim_weight_too_much_makes_solution_invalid`. + let (mut ext, _) = ExtBuilder::unsigned().desired_targets(77).build_offchainify(); + ext.execute_with_sanity_checks(|| { + roll_to_unsigned_open(); + ensure_voters(3, 12); + + // beautiful errors, isn't it? + assert_eq!( + OffchainWorkerMiner::::mine_checked_call().unwrap_err(), + OffchainMinerError::Common(CommonError::WrongWinnerCount) + ); + }); + } + + #[test] + #[ignore] + fn multi_page_miner_on_remote_state() { + todo!(); + } +} diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs new file mode 100644 index 0000000000000..18ba2370683ce --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs @@ -0,0 +1,625 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! ## The unsigned phase, and its miner. +//! +//! This pallet deals with unsigned submissions. These are backup, "possibly" multi-page submissions +//! from validators. +//! +//! This pallet has two miners, described in [`unsigned::miner`]. +//! +//! As it stands, a validator can, during the unsigned phase, submit up to +//! [`unsigned::Config::MinerPages`] pages. While this can be more than 1, it can likely not be a +//! full, high quality solution. This is because unsigned validator solutions are verified on the +//! fly, all within a single block. The exact value of this parameter should be determined by the +//! benchmarks of a runtime. +//! +//! We could implement a protocol to allow multi-block, multi-page collaborative submissions from +//! different validators, but it is not trivial. Moreover, recall that the unsigned phase is merely +//! a backup and we should primarily rely on offchain staking miners to fulfill this role during +//! `Phase::Signed`. +//! +//! ## Future Idea: Multi-Page unsigned submission +//! +//! the following is the idea of how to implement multi-page unsigned, which we don't have. +//! +//! All validators will run their miners and compute the full paginated solution. They submit all +//! pages as individual unsigned transactions to their local tx-pool. +//! +//! Upon validation, if any page is now present the corresponding transaction is dropped. +//! +//! At each block, the first page that may be valid is included as a high priority operational +//! transaction. This page is validated on the fly to be correct. Since this transaction is sourced +//! from a validator, we can panic if they submit an invalid transaction. +//! +//! Then, once the final page is submitted, some extra checks are done, as explained in +//! [`crate::verifier`]: +//! +//! 1. bounds +//! 2. total score +//! +//! These checks might still fail. If they do, the solution is dropped. At this point, we don't know +//! which validator may have submitted a slightly-faulty solution. +//! +//! In order to prevent this, the transaction validation process always includes a check to ensure +//! all of the previous pages that have been submitted match what the local validator has computed. +//! If they match, the validator knows that they are putting skin in a game that is valid. +//! +//! If any bad paged are detected, the next validator can bail. This process means: +//! +//! * As long as all validators are honest, and run the same miner code, a correct solution is +//! found. +//! * As little as one malicious validator can stall the process, but no one is accidentally +//! slashed, and no panic happens. +//! +//! Alternatively, we can keep track of submitters, and report a slash if it occurs. Or, if +//! the signed process is bullet-proof, we can be okay with the status quo. + +/// Export weights +pub use crate::weights::measured::pallet_election_provider_multi_block_unsigned::*; +/// Exports of this pallet +pub use pallet::*; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +/// The miner. +pub mod miner; + +#[frame_support::pallet] +mod pallet { + use super::WeightInfo; + use crate::{ + types::*, + unsigned::miner::{self}, + verifier::Verifier, + CommonError, + }; + use frame_support::pallet_prelude::*; + use frame_system::{offchain::CreateInherent, pallet_prelude::*}; + use sp_runtime::traits::SaturatedConversion; + use sp_std::prelude::*; + + /// convert a [`crate::CommonError`] to a custom InvalidTransaction with the inner code being + /// the index of the variant. + fn base_error_to_invalid(error: CommonError) -> InvalidTransaction { + let index = error.encode().pop().unwrap_or(0); + InvalidTransaction::Custom(index) + } + + pub(crate) type UnsignedWeightsOf = ::WeightInfo; + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: crate::Config + CreateInherent> { + /// The repeat threshold of the offchain worker. + /// + /// For example, if it is 5, that means that at least 5 blocks will elapse between attempts + /// to submit the worker's solution. + type OffchainRepeat: Get>; + + /// The solver used in hte offchain worker miner + type OffchainSolver: frame_election_provider_support::NposSolver< + AccountId = Self::AccountId, + >; + + /// The priority of the unsigned transaction submitted in the unsigned-phase + type MinerTxPriority: Get; + + /// The number of pages that the offchain miner will try and submit. + type MinerPages: Get; + + /// Runtime weight information of this pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::call] + impl Pallet { + /// Submit an unsigned solution. + /// + /// This works very much like an inherent, as only the validators are permitted to submit + /// anything. By default validators will compute this call in their `offchain_worker` hook + /// and try and submit it back. + /// + /// This is different from signed page submission mainly in that the solution page is + /// verified on the fly. + /// + /// The `paged_solution` may contain at most [`Config::MinerPages`] pages. They are + /// interpreted as msp -> lsp, as per [`crate::Pallet::msp_range_for`]. + /// + /// For example, if `Pages = 4`, and `MinerPages = 2`, our full snapshot range would be [0, + /// 1, 2, 3], with 3 being msp. But, in this case, then the `paged_raw_solution.pages` is + /// expected to correspond to `[snapshot(2), snapshot(3)]`. + #[pallet::weight((UnsignedWeightsOf::::submit_unsigned(), DispatchClass::Operational))] + #[pallet::call_index(0)] + pub fn submit_unsigned( + origin: OriginFor, + paged_solution: Box>, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + let error_message = "Invalid unsigned submission must produce invalid block and \ + deprive validator from their authoring reward."; + + // phase, round, claimed score, page-count and hash are checked in pre-dispatch. we + // don't check them here anymore. + debug_assert!(Self::validate_unsigned_checks(&paged_solution).is_ok()); + + let claimed_score = paged_solution.score; + + // we select the most significant pages, based on `T::MinerPages`. + let page_indices = crate::Pallet::::msp_range_for(T::MinerPages::get() as usize); + ::verify_synchronous_multi( + paged_solution.solution_pages.into_inner(), + page_indices, + claimed_score, + ) + .expect(error_message); + + sublog!(info, "unsigned", "queued an unsigned solution with score {:?}", claimed_score); + + Ok(None.into()) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::submit_unsigned { paged_solution, .. } = call { + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, + _ => return InvalidTransaction::Call.into(), + } + + let _ = Self::validate_unsigned_checks(paged_solution.as_ref()) + .map_err(|err| { + sublog!( + debug, + "unsigned", + "unsigned transaction validation failed due to {:?}", + err + ); + err + }) + .map_err(base_error_to_invalid)?; + + ValidTransaction::with_tag_prefix("OffchainElection") + // The higher the score.minimal_stake, the better a paged_solution is. + .priority( + T::MinerTxPriority::get() + .saturating_add(paged_solution.score.minimal_stake.saturated_into()), + ) + // Used to deduplicate unsigned solutions: each validator should produce one + // paged_solution per round at most, and solutions are not propagate. + .and_provides(paged_solution.round) + // Transaction should stay in the pool for the duration of the unsigned phase. + .longevity(T::UnsignedPhase::get().saturated_into::()) + // We don't propagate this. This can never be validated at a remote node. + .propagate(false) + .build() + } else { + InvalidTransaction::Call.into() + } + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + if let Call::submit_unsigned { paged_solution, .. } = call { + Self::validate_unsigned_checks(paged_solution.as_ref()) + .map_err(base_error_to_invalid) + .map_err(Into::into) + } else { + Err(InvalidTransaction::Call.into()) + } + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + assert!( + UnsignedWeightsOf::::submit_unsigned().all_lte(T::BlockWeights::get().max_block), + "weight of `submit_unsigned` is too high" + ) + } + + #[cfg(feature = "try-runtime")] + fn try_state(now: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state(now) + } + + fn offchain_worker(now: BlockNumberFor) { + use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; + + // Create a lock with the maximum deadline of number of blocks in the unsigned phase. + // This should only come useful in an **abrupt** termination of execution, otherwise the + // guard will be dropped upon successful execution. + let mut lock = + StorageLock::>>::with_block_deadline( + miner::OffchainWorkerMiner::::OFFCHAIN_LOCK, + T::UnsignedPhase::get().saturated_into(), + ); + + match lock.try_lock() { + Ok(_guard) => { + Self::do_synchronized_offchain_worker(now); + }, + Err(deadline) => { + sublog!( + debug, + "unsigned", + "offchain worker lock not released, deadline is {:?}", + deadline + ); + }, + }; + } + } + + impl Pallet { + /// Internal logic of the offchain worker, to be executed only when the offchain lock is + /// acquired with success. + fn do_synchronized_offchain_worker(now: BlockNumberFor) { + use miner::OffchainWorkerMiner; + + let current_phase = crate::Pallet::::current_phase(); + sublog!( + trace, + "unsigned", + "lock for offchain worker acquired. Phase = {:?}", + current_phase + ); + if current_phase.is_unsigned_opened_now() { + // Mine a new solution, cache it, and attempt to submit it + let initial_output = + OffchainWorkerMiner::::ensure_offchain_repeat_frequency(now) + .and_then(|_| OffchainWorkerMiner::::mine_check_save_submit()); + sublog!(debug, "unsigned", "initial offchain worker output: {:?}", initial_output); + } else if current_phase.is_unsigned() { + // Try and resubmit the cached solution, and recompute ONLY if it is not + // feasible. + let resubmit_output = OffchainWorkerMiner::::ensure_offchain_repeat_frequency( + now, + ) + .and_then(|_| OffchainWorkerMiner::::restore_or_compute_then_maybe_submit()); + sublog!( + debug, + "unsigned", + "resubmit offchain worker output: {:?}", + resubmit_output + ); + }; + } + + /// The checks that should happen in the `ValidateUnsigned`'s `pre_dispatch` and + /// `validate_unsigned` functions. + /// + /// These check both for snapshot independent checks, and some checks that are specific to + /// the unsigned phase. + pub(crate) fn validate_unsigned_checks( + paged_solution: &PagedRawSolution, + ) -> Result<(), CommonError> { + Self::unsigned_specific_checks(paged_solution) + .and(crate::Pallet::::snapshot_independent_checks(paged_solution, None)) + .map_err(Into::into) + } + + /// The checks that are specific to the (this) unsigned pallet. + /// + /// ensure solution has the correct phase, and it has only 1 page. + pub fn unsigned_specific_checks( + paged_solution: &PagedRawSolution, + ) -> Result<(), CommonError> { + ensure!( + crate::Pallet::::current_phase().is_unsigned(), + CommonError::EarlySubmission + ); + ensure!( + paged_solution.solution_pages.len() == T::MinerPages::get() as usize, + CommonError::WrongPageCount + ); + + Ok(()) + } + + #[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))] + pub(crate) fn do_try_state( + _now: BlockNumberFor, + ) -> Result<(), sp_runtime::TryRuntimeError> { + Ok(()) + } + } +} + +#[cfg(test)] +mod validate_unsigned { + use frame_election_provider_support::Support; + use frame_support::{ + pallet_prelude::InvalidTransaction, + unsigned::{TransactionSource, TransactionValidityError, ValidateUnsigned}, + }; + + use super::Call; + use crate::{mock::*, types::*, verifier::Verifier}; + + #[test] + fn retracts_weak_score_accepts_threshold_better() { + ExtBuilder::unsigned() + .solution_improvement_threshold(sp_runtime::Perbill::from_percent(10)) + .build_and_execute(|| { + roll_to_snapshot_created(); + + let solution = mine_full_solution().unwrap(); + load_mock_signed_and_start(solution.clone()); + roll_to_full_verification(); + + // Some good solution is queued now. + assert_eq!( + ::queued_score(), + Some(ElectionScore { + minimal_stake: 55, + sum_stake: 130, + sum_stake_squared: 8650 + }) + ); + + roll_to_unsigned_open(); + + // this is just worse + let attempt = + fake_solution(ElectionScore { minimal_stake: 20, ..Default::default() }); + let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; + assert_eq!( + UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(2)), + ); + + // this is better, but not enough better. + let insufficient_improvement = 55 * 105 / 100; + let attempt = fake_solution(ElectionScore { + minimal_stake: insufficient_improvement, + ..Default::default() + }); + let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; + assert_eq!( + UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(2)), + ); + + // note that we now have to use a solution with 2 winners, just to pass all of the + // snapshot independent checks. + let mut paged = raw_paged_from_supports( + vec![vec![ + (40, Support { total: 10, voters: vec![(3, 5)] }), + (30, Support { total: 10, voters: vec![(3, 5)] }), + ]], + 0, + ); + let sufficient_improvement = 55 * 115 / 100; + paged.score = + ElectionScore { minimal_stake: sufficient_improvement, ..Default::default() }; + let call = Call::submit_unsigned { paged_solution: Box::new(paged) }; + assert!(UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).is_ok()); + }) + } + + #[test] + fn retracts_wrong_round() { + ExtBuilder::unsigned().build_and_execute(|| { + roll_to_unsigned_open(); + + let mut attempt = + fake_solution(ElectionScore { minimal_stake: 5, ..Default::default() }); + attempt.round += 1; + let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; + + assert_eq!( + UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), + // WrongRound is index 1 + TransactionValidityError::Invalid(InvalidTransaction::Custom(1)), + ); + }) + } + + #[test] + fn retracts_too_many_pages_unsigned() { + ExtBuilder::unsigned().build_and_execute(|| { + // NOTE: unsigned solutions should have just 1 page, regardless of the configured + // page count. + roll_to_unsigned_open(); + let attempt = mine_full_solution().unwrap(); + let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; + + assert_eq!( + UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), + // WrongPageCount is index 3 + TransactionValidityError::Invalid(InvalidTransaction::Custom(3)), + ); + + let attempt = mine_solution(2).unwrap(); + let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; + + assert_eq!( + UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(3)), + ); + + let attempt = mine_solution(1).unwrap(); + let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; + + assert!(UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).is_ok(),); + }) + } + + #[test] + fn retracts_wrong_winner_count() { + ExtBuilder::unsigned().desired_targets(2).build_and_execute(|| { + roll_to_unsigned_open(); + + let paged = raw_paged_from_supports( + vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]], + 0, + ); + + let call = Call::submit_unsigned { paged_solution: Box::new(paged) }; + + assert_eq!( + UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), + // WrongWinnerCount is index 4 + TransactionValidityError::Invalid(InvalidTransaction::Custom(4)), + ); + }); + } + + #[test] + fn retracts_wrong_phase() { + ExtBuilder::unsigned().signed_phase(5, 0).build_and_execute(|| { + let solution = raw_paged_solution_low_score(); + let call = Call::submit_unsigned { paged_solution: Box::new(solution.clone()) }; + + // initial + assert_eq!(MultiBlock::current_phase(), Phase::Off); + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + // because EarlySubmission is index 0. + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + + // signed + roll_to_signed_open(); + assert!(MultiBlock::current_phase().is_signed()); + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + + // unsigned + roll_to(25); + assert!(MultiBlock::current_phase().is_unsigned()); + + assert_ok!(::validate_unsigned( + TransactionSource::Local, + &call + )); + assert_ok!(::pre_dispatch(&call)); + }) + } + + #[test] + fn priority_is_set() { + ExtBuilder::unsigned() + .miner_tx_priority(20) + .desired_targets(0) + .build_and_execute(|| { + roll_to_unsigned_open(); + assert!(MultiBlock::current_phase().is_unsigned()); + + let solution = + fake_solution(ElectionScore { minimal_stake: 5, ..Default::default() }); + let call = Call::submit_unsigned { paged_solution: Box::new(solution.clone()) }; + + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap() + .priority, + 25 + ); + }) + } +} + +#[cfg(test)] +mod call { + use crate::{mock::*, verifier::Verifier, Snapshot}; + + #[test] + fn unsigned_submission_e2e() { + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + roll_to_unsigned_open(); + + // snapshot is created.. + assert_full_snapshot(); + // ..txpool is empty.. + assert_eq!(pool.read().transactions.len(), 0); + // ..but nothing queued. + assert_eq!(::queued_score(), None); + + // now the OCW should submit something. + roll_next_with_ocw(Some(pool.clone())); + assert_eq!(pool.read().transactions.len(), 1); + assert_eq!(::queued_score(), None); + + // and now it should be applied. + roll_next_with_ocw(Some(pool.clone())); + assert_eq!(pool.read().transactions.len(), 0); + assert!(matches!(::queued_score(), Some(_))); + }) + } + + #[test] + #[should_panic( + expected = "Invalid unsigned submission must produce invalid block and deprive validator from their authoring reward." + )] + fn unfeasible_solution_panics() { + let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); + ext.execute_with_sanity_checks(|| { + roll_to_unsigned_open(); + + // snapshot is created.. + assert_full_snapshot(); + // ..txpool is empty.. + assert_eq!(pool.read().transactions.len(), 0); + // ..but nothing queued. + assert_eq!(::queued_score(), None); + + // now the OCW should submit something. + roll_next_with_ocw(Some(pool.clone())); + assert_eq!(pool.read().transactions.len(), 1); + assert_eq!(::queued_score(), None); + + // now we change the snapshot -- this should ensure that the solution becomes invalid. + // Note that we don't change the known fingerprint of the solution. + Snapshot::::remove_target(2); + + // and now it should be applied. + roll_next_with_ocw(Some(pool.clone())); + assert_eq!(pool.read().transactions.len(), 0); + assert!(matches!(::queued_score(), Some(_))); + }) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs new file mode 100644 index 0000000000000..c41af1dc13cef --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + verifier::{Config, Event, FeasibilityError, Pallet, Status, StatusStorage}, + CurrentPhase, Phase, +}; +use frame_benchmarking::v2::*; +use frame_election_provider_support::{ElectionProvider, NposSolution}; +use frame_support::pallet_prelude::*; +use sp_std::prelude::*; + +#[benchmarks(where + T: crate::Config + crate::signed::Config + crate::unsigned::Config, + ::RuntimeEvent: TryInto> +)] +mod benchmarks { + use super::*; + + fn events_for() -> Vec> + where + ::RuntimeEvent: TryInto>, + { + frame_system::Pallet::::read_events_for_pallet::>() + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_valid_non_terminal() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + // roll to signed validation, with a solution stored in the signed pallet + + crate::Pallet::::roll_to_signed_and_submit_full_solution()?; + // roll to verification + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) + }); + + // start signal must have been sent by now + assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp())); + + #[block] + { + crate::Pallet::::roll_next(true, false); + } + assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp() - 1)); + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_valid_terminal() -> Result<(), BenchmarkError> { + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + // roll to signed validation, with a solution stored in the signed pallet + assert!( + T::SignedValidationPhase::get() >= T::Pages::get().into(), + "Signed validation phase must be larger than the number of pages" + ); + + crate::Pallet::::roll_to_signed_and_submit_full_solution()?; + // roll to before the last page of verification + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) + }); + // start signal must have been sent by now + assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp())); + for _ in 0..(T::Pages::get() - 1) { + crate::Pallet::::roll_next(true, false); + } + + // we must have verified all pages by now, minus the last one. + assert!(matches!( + &events_for::()[..], + [Event::Verified(_, _), .., Event::Verified(1, _)] + )); + + // verify the last page. + #[block] + { + crate::Pallet::::roll_next(true, false); + } + + // we are done + assert_eq!(StatusStorage::::get(), Status::Nothing); + // last event is success + assert!(matches!( + &events_for::()[..], + [Event::Verified(_, _), .., Event::Verified(0, _), Event::Queued(_, None)] + )); + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_invalid_terminal() -> Result<(), BenchmarkError> { + // this is the verification of the current page + removing all of the previously valid + // pages. The worst case is therefore when the last page is invalid, for example the final + // score. + assert!(T::Pages::get() >= 2, "benchmark only works if we have more than 2 pages"); + + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + // roll to signed validation, with a solution stored in the signed pallet + + // but this solution is corrupt + let mut paged_solution = crate::Pallet::::roll_to_signed_and_mine_full_solution(); + paged_solution.score.minimal_stake -= 1; + crate::Pallet::::submit_full_solution(paged_solution)?; + + // roll to verification + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) + }); + + assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp())); + // verify all pages, except for the last one. + for i in 0..T::Pages::get() - 1 { + crate::Pallet::::roll_next(true, false); + assert_eq!( + StatusStorage::::get(), + Status::Ongoing(crate::Pallet::::msp() - 1 - i) + ); + } + + // next page to be verified is the last one + assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::lsp())); + assert!(matches!( + &events_for::()[..], + [Event::Verified(_, _), .., Event::Verified(1, _)] + )); + + #[block] + { + crate::Pallet::::roll_next(true, false); + } + + // we are now reset. + assert_eq!(StatusStorage::::get(), Status::Nothing); + assert!(matches!( + &events_for::()[..], + [ + .., + Event::Verified(0, _), + Event::VerificationFailed(0, FeasibilityError::InvalidScore) + ] + )); + + Ok(()) + } + + #[benchmark(pov_mode = Measured)] + fn on_initialize_invalid_non_terminal( + // number of valid pages that have been verified, before we verify the non-terminal invalid + // page. + v: Linear<0, { T::Pages::get() - 1 }>, + ) -> Result<(), BenchmarkError> { + assert!(T::Pages::get() >= 2, "benchmark only works if we have more than 2 pages"); + + #[cfg(test)] + crate::mock::ElectionStart::set(sp_runtime::traits::Bounded::max_value()); + crate::Pallet::::start().unwrap(); + + // roll to signed validation, with a solution stored in the signed pallet, but this solution + // is corrupt in its msp. + let mut paged_solution = crate::Pallet::::roll_to_signed_and_mine_full_solution(); + let page_to_corrupt = crate::Pallet::::msp() - v; + crate::log!( + info, + "pages of solution: {:?}, to corrupt {}, v {}", + paged_solution.solution_pages.len(), + page_to_corrupt, + v + ); + paged_solution.solution_pages[page_to_corrupt as usize].corrupt(); + crate::Pallet::::submit_full_solution(paged_solution)?; + + // roll to verification + crate::Pallet::::roll_until_matches(|| { + matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) + }); + + // we should be ready to go + assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp())); + + // validate the the parameterized number of valid pages. + for _ in 0..v { + crate::Pallet::::roll_next(true, false); + } + + // we are still ready to continue + assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp() - v)); + + // verify one page, which will be invalid. + #[block] + { + crate::Pallet::::roll_next(true, false); + } + + // we are now reset, because this page was invalid. + assert_eq!(StatusStorage::::get(), Status::Nothing); + + assert!(matches!( + &events_for::()[..], + [.., Event::VerificationFailed(_, FeasibilityError::NposElection(_))] + )); + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::ExtBuilder::full().build_unchecked(), + crate::mock::Runtime + ); +} diff --git a/substrate/frame/election-provider-multi-block/src/verifier/impls.rs b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs new file mode 100644 index 0000000000000..0236fe58fa1a2 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs @@ -0,0 +1,1017 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The implementation of the verifier pallet, and an implementation of [`crate::Verifier`] and +//! [`crate::AsynchronousVerifier`] for [`Pallet`]. + +use super::*; +use crate::{ + helpers, + types::VoterOf, + unsigned::miner::{MinerConfig, PageSupportsOfMiner}, + verifier::Verifier, + SolutionOf, +}; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_election_provider_support::{ + ExtendedBalance, NposSolution, PageIndex, TryFromOtherBounds, +}; +use frame_support::{ + ensure, + pallet_prelude::{ValueQuery, *}, + traits::{defensive_prelude::*, Defensive, Get}, +}; +use frame_system::pallet_prelude::*; +use pallet::*; +use sp_npos_elections::{evaluate_support, ElectionScore}; +use sp_runtime::Perbill; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; + +pub(crate) type SupportsOfVerifier = frame_election_provider_support::BoundedSupports< + ::AccountId, + ::MaxWinnersPerPage, + ::MaxBackersPerWinner, +>; + +pub(crate) type VerifierWeightsOf = ::WeightInfo; + +/// The status of this pallet. +#[derive( + Encode, Decode, scale_info::TypeInfo, Clone, Copy, MaxEncodedLen, Debug, PartialEq, Eq, +)] +pub enum Status { + /// A verification is ongoing, and the next page that will be verified is indicated with the + /// inner value. + Ongoing(PageIndex), + /// Nothing is happening. + Nothing, +} + +impl Default for Status { + fn default() -> Self { + Self::Nothing + } +} + +/// Enum to point to the valid variant of the [`QueuedSolution`]. +#[derive(Encode, Decode, scale_info::TypeInfo, Clone, Copy, MaxEncodedLen)] +enum ValidSolution { + X, + Y, +} + +impl Default for ValidSolution { + fn default() -> Self { + ValidSolution::Y + } +} + +impl ValidSolution { + fn other(&self) -> Self { + match *self { + ValidSolution::X => ValidSolution::Y, + ValidSolution::Y => ValidSolution::X, + } + } +} + +/// A simple newtype that represents the partial backing of a winner. It only stores the total +/// backing, and the sum of backings, as opposed to a [`sp_npos_elections::Support`] that also +/// stores all of the backers' individual contribution. +/// +/// This is mainly here to allow us to implement `Backings` for it. +#[derive(Default, Encode, Decode, MaxEncodedLen, scale_info::TypeInfo)] +pub struct PartialBackings { + /// The total backing of this particular winner. + pub total: ExtendedBalance, + /// The number of backers. + pub backers: u32, +} + +impl sp_npos_elections::Backings for PartialBackings { + fn total(&self) -> ExtendedBalance { + self.total + } +} + +#[frame_support::pallet] +pub(crate) mod pallet { + use super::*; + + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: crate::Config { + /// The minimum amount of improvement to the solution score that defines a solution as + /// "better". + #[pallet::constant] + type SolutionImprovementThreshold: Get; + + /// Maximum number of backers, per winner, among all pages of an election. + /// + /// This can only be checked at the very final step of verification. + /// + /// NOTE: at the moment, we don't check this, and it is in place for future compatibility. + #[pallet::constant] + type MaxBackersPerWinnerFinal: Get; + + /// Maximum number of backers, per winner, per page. + #[pallet::constant] + type MaxBackersPerWinner: Get; + + /// Maximum number of supports (aka. winners/validators/targets) that can be represented in + /// a page of results. + #[pallet::constant] + type MaxWinnersPerPage: Get; + + /// Something that can provide the solution data to the verifier. + /// + /// In reality, this will be fulfilled by the signed phase. + type SolutionDataProvider: crate::verifier::SolutionDataProvider< + Solution = SolutionOf, + >; + + /// The weight information of this pallet. + type WeightInfo: super::WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// The verification data was unavailable and it could not continue. + VerificationDataUnavailable, + /// A verification failed at the given page. + /// + /// NOTE: if the index is 0, then this could mean either the feasibility of the last page + /// was wrong, or the final checks of `finalize_verification` failed. + VerificationFailed(PageIndex, FeasibilityError), + /// The given page of a solution has been verified, with the given number of winners being + /// found in it. + Verified(PageIndex, u32), + /// A solution with the given score has replaced our current best solution. + Queued(ElectionScore, Option), + } + + /// A wrapper interface for the storage items related to the queued solution. + /// + /// It wraps the following: + /// + /// - `QueuedSolutionX` + /// - `QueuedSolutionY` + /// - `QueuedValidVariant` + /// - `QueuedSolutionScore` + /// - `QueuedSolutionBackings` + /// + /// As the name suggests, `QueuedValidVariant` points to the correct variant between + /// `QueuedSolutionX` and `QueuedSolutionY`. In the context of this pallet, by VALID and + /// INVALID variant we mean either of these two storage items, based on the value of + /// `QueuedValidVariant`. + /// + /// ### Invariants + /// + /// The following conditions must be met at all times for this group of storage items to be + /// sane. + /// + /// - `QueuedSolutionScore` must always be correct. In other words, it should correctly be the + /// score of `QueuedValidVariant`. + /// - `QueuedSolutionScore` must always be [`Config::SolutionImprovementThreshold`] better than + /// `MinimumScore`. + /// - The number of existing keys in `QueuedSolutionBackings` must always match that of the + /// INVALID variant. + /// + /// Moreover, the following conditions must be met when this pallet is in [`Status::Nothing`], + /// meaning that no ongoing asynchronous verification is ongoing. + /// + /// - No keys should exist in the INVALID variant. + /// - This implies that no data should exist in `QueuedSolutionBackings`. + /// + /// > Note that some keys *might* exist in the queued variant, but since partial solutions + /// > (having less than `T::Pages` pages) are in principle correct, we cannot assert anything on + /// > the number of keys in the VALID variant. In fact, an empty solution with score of [0, 0, + /// > 0] can also be correct. + /// + /// No additional conditions must be met when the pallet is in [`Status::Ongoing`]. The number + /// of pages in + pub struct QueuedSolution(sp_std::marker::PhantomData); + impl QueuedSolution { + /// Private helper for mutating the storage group. + fn mutate_checked(mutate: impl FnOnce() -> R) -> R { + let r = mutate(); + #[cfg(debug_assertions)] + assert!(Self::sanity_check().is_ok()); + r + } + + /// Finalize a correct solution. + /// + /// Should be called at the end of a verification process, once we are sure that a certain + /// solution is 100% correct. + /// + /// It stores its score, flips the pointer to it being the current best one, and clears all + /// the backings and the invalid variant. (note: in principle, we can skip clearing the + /// backings here) + pub(crate) fn finalize_correct(score: ElectionScore) { + sublog!( + info, + "verifier", + "finalizing verification a correct solution, replacing old score {:?} with {:?}", + QueuedSolutionScore::::get(), + score + ); + + Self::mutate_checked(|| { + QueuedValidVariant::::mutate(|v| *v = v.other()); + QueuedSolutionScore::::put(score); + + // Clear what was previously the valid variant. Also clears the partial backings. + Self::clear_invalid_and_backings_unchecked(); + }); + } + + /// Clear all relevant information of an invalid solution. + /// + /// Should be called at any step, if we encounter an issue which makes the solution + /// infeasible. + pub(crate) fn clear_invalid_and_backings() { + Self::mutate_checked(Self::clear_invalid_and_backings_unchecked) + } + + /// Same as [`clear_invalid_and_backings`], but without any checks for the integrity of the + /// storage item group. + pub(crate) fn clear_invalid_and_backings_unchecked() { + // clear is safe as we delete at most `Pages` entries, and `Pages` is bounded. + match Self::invalid() { + ValidSolution::X => clear_paged_map!(QueuedSolutionX::), + ValidSolution::Y => clear_paged_map!(QueuedSolutionY::), + }; + clear_paged_map!(QueuedSolutionBackings::); + } + + /// Write a single page of a valid solution into the `invalid` variant of the storage. + /// + /// This should only be called once we are sure that this particular page is 100% correct. + /// + /// This is called after *a page* has been validated, but the entire solution is not yet + /// known to be valid. At this stage, we write to the invalid variant. Once all pages are + /// verified, a call to [`finalize_correct`] will seal the correct pages and flip the + /// invalid/valid variants. + pub(crate) fn set_invalid_page(page: PageIndex, supports: SupportsOfVerifier>) { + use frame_support::traits::TryCollect; + Self::mutate_checked(|| { + let backings: BoundedVec<_, _> = supports + .iter() + .map(|(x, s)| (x.clone(), PartialBackings { total: s.total, backers: s.voters.len() as u32 } )) + .try_collect() + .expect("`SupportsOfVerifier` is bounded by as Verifier>::MaxWinnersPerPage, which is assured to be the same as `T::MaxWinnersPerPage` in an integrity test"); + QueuedSolutionBackings::::insert(page, backings); + + match Self::invalid() { + ValidSolution::X => QueuedSolutionX::::insert(page, supports), + ValidSolution::Y => QueuedSolutionY::::insert(page, supports), + } + }) + } + + /// Write a single page to the valid variant directly. + /// + /// This is not the normal flow of writing, and the solution is not checked. + /// + /// This is only useful to override the valid solution with a single (likely backup) + /// solution. + pub(crate) fn force_set_single_page_valid( + page: PageIndex, + supports: SupportsOfVerifier>, + score: ElectionScore, + ) { + Self::mutate_checked(|| { + // clear everything about valid solutions. + match Self::valid() { + ValidSolution::X => clear_paged_map!(QueuedSolutionX::), + ValidSolution::Y => clear_paged_map!(QueuedSolutionY::), + }; + QueuedSolutionScore::::kill(); + + // write a single new page. + match Self::valid() { + ValidSolution::X => QueuedSolutionX::::insert(page, supports), + ValidSolution::Y => QueuedSolutionY::::insert(page, supports), + } + + // write the score. + QueuedSolutionScore::::put(score); + }) + } + + pub(crate) fn force_set_multi_page_valid( + pages: Vec, + supports: Vec>>, + score: ElectionScore, + ) { + debug_assert_eq!(pages.len(), supports.len()); + // queue it in our valid queue + Self::mutate_checked(|| { + // clear everything about valid solutions. + match Self::valid() { + ValidSolution::X => clear_paged_map!(QueuedSolutionX::), + ValidSolution::Y => clear_paged_map!(QueuedSolutionY::), + }; + QueuedSolutionScore::::kill(); + + // store the valid pages + for (support, page) in supports.into_iter().zip(pages.iter()) { + match Self::valid() { + ValidSolution::X => QueuedSolutionX::::insert(page, support), + ValidSolution::Y => QueuedSolutionY::::insert(page, support), + } + } + QueuedSolutionScore::::put(score); + }); + } + + /// Clear all storage items. + /// + /// Should only be called once everything is done. + pub(crate) fn kill() { + Self::mutate_checked(|| { + clear_paged_map!(QueuedSolutionX::); + clear_paged_map!(QueuedSolutionY::); + QueuedValidVariant::::kill(); + clear_paged_map!(QueuedSolutionBackings::); + QueuedSolutionScore::::kill(); + }) + } + + // -- non-mutating methods. + + /// Return the `score` and `winner_count` of verifying solution. + /// + /// Assumes that all the corresponding pages of `QueuedSolutionBackings` exist, then it + /// computes the final score of the solution that is currently at the end of its + /// verification process. + /// + /// This solution corresponds to whatever is stored in the INVALID variant of + /// `QueuedSolution`. Recall that the score of this solution is not yet verified, so it + /// should never become `valid`. + pub(crate) fn compute_invalid_score() -> Result<(ElectionScore, u32), FeasibilityError> { + // ensure that this is only called when all pages are verified individually. + if QueuedSolutionBackings::::iter_keys().count() != T::Pages::get() as usize { + return Err(FeasibilityError::Incomplete) + } + + let mut total_supports: BTreeMap = Default::default(); + for (who, PartialBackings { backers, total }) in + QueuedSolutionBackings::::iter().flat_map(|(_, pb)| pb) + { + let entry = total_supports.entry(who).or_default(); + entry.total = entry.total.saturating_add(total); + entry.backers = entry.backers.saturating_add(backers); + + if entry.backers > T::MaxBackersPerWinnerFinal::get() { + return Err(FeasibilityError::FailedToBoundSupport) + } + } + + let winner_count = total_supports.len() as u32; + let score = evaluate_support(total_supports.into_values()); + + Ok((score, winner_count)) + } + + /// The score of the current best solution, if any. + pub(crate) fn queued_score() -> Option { + QueuedSolutionScore::::get() + } + + /// Get a page of the current queued (aka valid) solution. + pub(crate) fn get_queued_solution_page( + page: PageIndex, + ) -> Option>> { + match Self::valid() { + ValidSolution::X => QueuedSolutionX::::get(page), + ValidSolution::Y => QueuedSolutionY::::get(page), + } + } + + fn valid() -> ValidSolution { + QueuedValidVariant::::get() + } + + fn invalid() -> ValidSolution { + Self::valid().other() + } + } + + #[allow(unused)] + #[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime", debug_assertions))] + impl QueuedSolution { + pub(crate) fn valid_iter( + ) -> impl Iterator>)> { + match Self::valid() { + ValidSolution::X => QueuedSolutionX::::iter(), + ValidSolution::Y => QueuedSolutionY::::iter(), + } + } + + pub(crate) fn invalid_iter( + ) -> impl Iterator>)> { + match Self::invalid() { + ValidSolution::X => QueuedSolutionX::::iter(), + ValidSolution::Y => QueuedSolutionY::::iter(), + } + } + + pub(crate) fn get_valid_page(page: PageIndex) -> Option>> { + match Self::valid() { + ValidSolution::X => QueuedSolutionX::::get(page), + ValidSolution::Y => QueuedSolutionY::::get(page), + } + } + + pub(crate) fn backing_iter() -> impl Iterator< + Item = (PageIndex, BoundedVec<(T::AccountId, PartialBackings), T::MaxWinnersPerPage>), + > { + QueuedSolutionBackings::::iter() + } + + /// Ensure that all the storage items managed by this struct are in `kill` state, meaning + /// that in the expect state after an election is OVER. + pub(crate) fn assert_killed() { + use frame_support::assert_storage_noop; + assert_storage_noop!(Self::kill()); + } + + /// Ensure this storage item group is in correct state. + pub(crate) fn sanity_check() -> Result<(), sp_runtime::DispatchError> { + // score is correct and better than min-score. + ensure!( + Pallet::::minimum_score() + .zip(Self::queued_score()) + .map_or(true, |(min_score, score)| score + .strict_threshold_better(min_score, Perbill::zero())), + "queued solution has weak score (min-score)" + ); + + if let Some(queued_score) = Self::queued_score() { + let mut backing_map: BTreeMap = BTreeMap::new(); + Self::valid_iter() + .flat_map(|(_, supports)| supports) + .for_each(|(who, support)| { + let entry = backing_map.entry(who).or_default(); + entry.total = entry.total.saturating_add(support.total); + }); + let real_score = evaluate_support(backing_map.into_values()); + ensure!(real_score == queued_score, "queued solution has wrong score"); + } else { + assert!(Self::valid_iter().count() == 0, "nothing should be stored if no score"); + } + + // The number of existing keys in `QueuedSolutionBackings` must always match that of + // the INVALID variant. + ensure!( + QueuedSolutionBackings::::iter().count() == Self::invalid_iter().count(), + "incorrect number of backings pages", + ); + + if let Status::Nothing = StatusStorage::::get() { + ensure!(Self::invalid_iter().count() == 0, "dangling data in invalid variant"); + } + + Ok(()) + } + } + + // -- private storage items, managed by `QueuedSolution`. + + /// The `X` variant of the current queued solution. Might be the valid one or not. + /// + /// The two variants of this storage item is to avoid the need of copying. Recall that once a + /// `VerifyingSolution` is being processed, it needs to write its partial supports *somewhere*. + /// Writing theses supports on top of a *good* queued supports is wrong, since we might bail. + /// Writing them to a bugger and copying at the ned is slightly better, but expensive. This flag + /// system is best of both worlds. + #[pallet::storage] + type QueuedSolutionX = + StorageMap<_, Twox64Concat, PageIndex, SupportsOfVerifier>>; + + /// The `Y` variant of the current queued solution. Might be the valid one or not. + #[pallet::storage] + type QueuedSolutionY = + StorageMap<_, Twox64Concat, PageIndex, SupportsOfVerifier>>; + /// Pointer to the variant of [`QueuedSolutionX`] or [`QueuedSolutionY`] that is currently + /// valid. + + #[pallet::storage] + type QueuedValidVariant = StorageValue<_, ValidSolution, ValueQuery>; + + /// The `(amount, count)` of backings, divided per page. + /// + /// This is stored because in the last block of verification we need them to compute the score, + /// and check `MaxBackersPerWinnerFinal`. + /// + /// This can only ever live for the invalid variant of the solution. Once it is valid, we don't + /// need this information anymore; the score is already computed once in + /// [`QueuedSolutionScore`], and the backing counts are checked. + #[pallet::storage] + type QueuedSolutionBackings = StorageMap< + _, + Twox64Concat, + PageIndex, + BoundedVec<(T::AccountId, PartialBackings), T::MaxWinnersPerPage>, + >; + + /// The score of the valid variant of [`QueuedSolution`]. + /// + /// This only ever lives for the `valid` variant. + #[pallet::storage] + type QueuedSolutionScore = StorageValue<_, ElectionScore>; + + // -- ^^ private storage items, managed by `QueuedSolution`. + + /// The minimum score that each solution must attain in order to be considered feasible. + #[pallet::storage] + #[pallet::getter(fn minimum_score)] + pub(crate) type MinimumScore = StorageValue<_, ElectionScore>; + + /// Storage item for [`Status`]. + #[pallet::storage] + #[pallet::getter(fn status_storage)] + pub(crate) type StatusStorage = StorageValue<_, Status, ValueQuery>; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::call] + impl Pallet {} + + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + // ensure that we have funneled some of our type parameters EXACTLY as-is to the + // verifier trait interface we implement. + assert_eq!(T::MaxWinnersPerPage::get(), ::MaxWinnersPerPage::get()); + assert_eq!( + T::MaxBackersPerWinner::get(), + ::MaxBackersPerWinner::get() + ); + assert!(T::MaxBackersPerWinner::get() <= T::MaxBackersPerWinnerFinal::get()); + } + + fn on_initialize(_n: BlockNumberFor) -> Weight { + Self::do_on_initialize() + } + + #[cfg(feature = "try-runtime")] + fn try_state(_now: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state(_now) + } + } +} + +impl Pallet { + fn do_on_initialize() -> Weight { + if let Status::Ongoing(current_page) = Self::status_storage() { + let maybe_page_solution = + ::get_page(current_page); + + if maybe_page_solution.as_ref().is_none() { + // the data provider has zilch, revert to a clean state, waiting for a new `start`. + sublog!( + error, + "verifier", + "T::SolutionDataProvider failed to deliver page {}. This is an unexpected error.", + current_page, + ); + + QueuedSolution::::clear_invalid_and_backings(); + StatusStorage::::put(Status::Nothing); + T::SolutionDataProvider::report_result(VerificationResult::DataUnavailable); + + Self::deposit_event(Event::::VerificationDataUnavailable); + // weight is a bit overestimate. + let wasted_pages = T::Pages::get().saturating_sub(current_page); + return VerifierWeightsOf::::on_initialize_invalid_non_terminal(wasted_pages); + } + + let page_solution = maybe_page_solution.expect("Option checked to not be None; qed"); + let maybe_supports = Self::feasibility_check_page_inner(page_solution, current_page); + + sublog!( + debug, + "verifier", + "verified page {} of a solution, outcome = {:?}", + current_page, + maybe_supports.as_ref().map(|s| s.len()) + ); + + match maybe_supports { + Ok(supports) => { + Self::deposit_event(Event::::Verified(current_page, supports.len() as u32)); + QueuedSolution::::set_invalid_page(current_page, supports); + + if current_page > crate::Pallet::::lsp() { + // not last page, just tick forward. + StatusStorage::::put(Status::Ongoing(current_page.saturating_sub(1))); + VerifierWeightsOf::::on_initialize_valid_non_terminal() + } else { + // last page, finalize everything. Solution data provider must always have a + // score for us at this point. Not much point in reporting a result, we just + // assume default score, which will almost certainly fail and cause a proper + // cleanup of the pallet, which is what we want anyways. + let claimed_score = + T::SolutionDataProvider::get_score().defensive_unwrap_or_default(); + + // in both cases of the following match, we are not back to the nothing + // state. + StatusStorage::::put(Status::Nothing); + + match Self::finalize_async_verification(claimed_score) { + Ok(_) => { + T::SolutionDataProvider::report_result(VerificationResult::Queued); + VerifierWeightsOf::::on_initialize_valid_terminal() + }, + Err(_) => { + T::SolutionDataProvider::report_result( + VerificationResult::Rejected, + ); + // In case of any of the errors, kill the solution. + QueuedSolution::::clear_invalid_and_backings(); + VerifierWeightsOf::::on_initialize_invalid_terminal() + }, + } + } + }, + Err(err) => { + // the page solution was invalid. + Self::deposit_event(Event::::VerificationFailed(current_page, err)); + StatusStorage::::put(Status::Nothing); + QueuedSolution::::clear_invalid_and_backings(); + T::SolutionDataProvider::report_result(VerificationResult::Rejected); + let wasted_pages = T::Pages::get().saturating_sub(current_page); + VerifierWeightsOf::::on_initialize_invalid_non_terminal(wasted_pages) + }, + } + } else { + T::DbWeight::get().reads(1) + } + } + + fn do_verify_synchronous_multi( + partial_solutions: Vec>, + solution_pages: Vec, + claimed_score: ElectionScore, + ) -> Result<(), (PageIndex, FeasibilityError)> { + let first_page = solution_pages.first().cloned().unwrap_or_default(); + let last_page = solution_pages.last().cloned().unwrap_or_default(); + // first, ensure this score will be good enough, even if valid.. + let _ = Self::ensure_score_quality(claimed_score).map_err(|fe| (first_page, fe))?; + ensure!( + partial_solutions.len() == solution_pages.len(), + (first_page, FeasibilityError::Incomplete) + ); + + // verify each page, and amalgamate into a final support. + let mut backings = + sp_std::collections::btree_map::BTreeMap::::new(); + let mut linked_supports = Vec::with_capacity(partial_solutions.len()); + + for (solution_page, page) in partial_solutions.into_iter().zip(solution_pages.iter()) { + let page_supports = Self::feasibility_check_page_inner(solution_page, *page) + .map_err(|fe| (*page, fe))?; + + linked_supports.push(page_supports.clone()); + let support_len = page_supports.len() as u32; + for (who, support) in page_supports.into_iter() { + let entry = backings.entry(who).or_default(); + entry.total = entry.total.saturating_add(support.total); + // Note we assume snapshots are always disjoint, and therefore we can easily extend + // here. + entry.backers = entry.backers.saturating_add(support.voters.len() as u32); + if entry.backers > T::MaxBackersPerWinnerFinal::get() { + return Err((*page, FeasibilityError::FailedToBoundSupport)) + } + } + + Self::deposit_event(Event::::Verified(*page, support_len)); + } + + // then check that the number of winners was exactly enough.. + let desired_targets = crate::Snapshot::::desired_targets() + .ok_or(FeasibilityError::SnapshotUnavailable) + .map_err(|fe| (last_page, fe))?; + ensure!( + backings.len() as u32 == desired_targets, + (last_page, FeasibilityError::WrongWinnerCount) + ); + + // then check the score was truth.. + let truth_score = evaluate_support(backings.into_values()); + ensure!(truth_score == claimed_score, (last_page, FeasibilityError::InvalidScore)); + + let maybe_current_score = QueuedSolution::::queued_score(); + + // then store it. + sublog!( + info, + "verifier", + "queued sync solution with score {:?} for pages {:?}", + truth_score, + solution_pages + ); + QueuedSolution::::force_set_multi_page_valid( + solution_pages, + linked_supports, + truth_score, + ); + Self::deposit_event(Event::::Queued(truth_score, maybe_current_score)); + + Ok(()) + } + + /// Finalize an asynchronous verification. Checks the final score for correctness, and ensures + /// that it matches all of the criteria. + /// + /// This should only be called when all pages of an async verification are done. + /// + /// Returns: + /// - `Ok()` if everything is okay, at which point the valid variant of the queued solution will + /// be updated. Returns + /// - `Err(Feasibility)` if any of the last verification steps fail. + fn finalize_async_verification(claimed_score: ElectionScore) -> Result<(), FeasibilityError> { + let outcome = QueuedSolution::::compute_invalid_score() + .and_then(|(final_score, winner_count)| { + let desired_targets = crate::Snapshot::::desired_targets().unwrap(); + // claimed_score checked prior in seal_unverified_solution + match (final_score == claimed_score, winner_count == desired_targets) { + (true, true) => { + // all good, finalize this solution + // NOTE: must be before the call to `finalize_correct`. + Self::deposit_event(Event::::Queued( + final_score, + QueuedSolution::::queued_score(), /* the previous score, now + * ejected. */ + )); + QueuedSolution::::finalize_correct(final_score); + Ok(()) + }, + (false, true) => Err(FeasibilityError::InvalidScore), + (true, false) => Err(FeasibilityError::WrongWinnerCount), + (false, false) => Err(FeasibilityError::InvalidScore), + } + }) + .map_err(|err| { + sublog!(warn, "verifier", "Finalizing solution was invalid due to {:?}.", err); + // and deposit an event about it. + Self::deposit_event(Event::::VerificationFailed(0, err.clone())); + err + }); + sublog!(debug, "verifier", "finalize verification outcome: {:?}", outcome); + outcome + } + + /// Ensure that the given score is: + /// + /// - better than the queued solution, if one exists. + /// - greater than the minimum untrusted score. + pub(crate) fn ensure_score_quality(score: ElectionScore) -> Result<(), FeasibilityError> { + let is_improvement = ::queued_score().map_or(true, |best_score| { + score.strict_threshold_better(best_score, T::SolutionImprovementThreshold::get()) + }); + ensure!(is_improvement, FeasibilityError::ScoreTooLow); + + let is_greater_than_min_untrusted = Self::minimum_score() + .map_or(true, |min_score| score.strict_threshold_better(min_score, Perbill::zero())); + ensure!(is_greater_than_min_untrusted, FeasibilityError::ScoreTooLow); + + Ok(()) + } + + /// Do the full feasibility check: + /// + /// - check all edges. + /// - checks `MaxBackersPerWinner` to be respected IN THIS PAGE. + /// - checks the number of winners to be less than or equal to `DesiredTargets` IN THIS PAGE + /// ONLY. + pub(super) fn feasibility_check_page_inner( + partial_solution: SolutionOf, + page: PageIndex, + ) -> Result, FeasibilityError> { + // Read the corresponding snapshots. + let snapshot_targets = + crate::Snapshot::::targets().ok_or(FeasibilityError::SnapshotUnavailable)?; + let snapshot_voters = + crate::Snapshot::::voters(page).ok_or(FeasibilityError::SnapshotUnavailable)?; + let desired_targets = + crate::Snapshot::::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?; + + feasibility_check_page_inner_with_snapshot::( + partial_solution, + &snapshot_voters, + &snapshot_targets, + desired_targets, + ) + .and_then(|miner_supports| { + SupportsOfVerifier::::try_from_other_bounds(miner_supports) + .defensive_map_err(|_| FeasibilityError::FailedToBoundSupport) + }) + } + + #[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))] + pub(crate) fn do_try_state(_now: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + QueuedSolution::::sanity_check() + } +} + +/// Same as `feasibility_check_page_inner`, but with a snapshot. +/// +/// This is exported as a standalone function, relying on `MinerConfig` rather than `Config` so that +/// it can be used in any offchain miner. +pub fn feasibility_check_page_inner_with_snapshot( + partial_solution: SolutionOf, + snapshot_voters: &BoundedVec, T::VoterSnapshotPerBlock>, + snapshot_targets: &BoundedVec, + desired_targets: u32, +) -> Result, FeasibilityError> { + // ----- Start building. First, we need some closures. + let cache = helpers::generate_voter_cache::(snapshot_voters); + let voter_at = helpers::voter_at_fn::(snapshot_voters); + let target_at = helpers::target_at_fn::(snapshot_targets); + let voter_index = helpers::voter_index_fn_usize::(&cache); + + // Then convert solution -> assignment. This will fail if any of the indices are + // gibberish. + let assignments = partial_solution + .into_assignment(voter_at, target_at) + .map_err::(Into::into)?; + + // Ensure that assignments are all correct. + let _ = assignments + .iter() + .map(|ref assignment| { + // Check that assignment.who is actually a voter (defensive-only). NOTE: while + // using the index map from `voter_index` is better than a blind linear search, + // this *still* has room for optimization. Note that we had the index when we + // did `solution -> assignment` and we lost it. Ideal is to keep the index + // around. + + // Defensive-only: must exist in the snapshot. + let snapshot_index = + voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?; + // Defensive-only: index comes from the snapshot, must exist. + let (_voter, _stake, targets) = + snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?; + debug_assert!(*_voter == assignment.who); + + // Check that all of the targets are valid based on the snapshot. + if assignment.distribution.iter().any(|(t, _)| !targets.contains(t)) { + return Err(FeasibilityError::InvalidVote) + } + Ok(()) + }) + .collect::>()?; + + // ----- Start building support. First, we need one more closure. + let stake_of = helpers::stake_of_fn::(&snapshot_voters, &cache); + + // This might fail if the normalization fails. Very unlikely. See `integrity_test`. + let staked_assignments = + sp_npos_elections::assignment_ratio_to_staked_normalized(assignments, stake_of) + .map_err::(Into::into)?; + + let supports = sp_npos_elections::to_supports(&staked_assignments); + + // Ensure some heuristics. These conditions must hold in the **entire** support, this is + // just a single page. But, they must hold in a single page as well. + ensure!((supports.len() as u32) <= desired_targets, FeasibilityError::WrongWinnerCount); + + // almost-defensive-only: `MaxBackersPerWinner` is already checked. A sane value of + // `MaxWinnersPerPage` should be more than any possible value of `desired_targets()`, which + // is ALSO checked, so this conversion can almost never fail. + let bounded_supports = + supports.try_into().map_err(|_| FeasibilityError::FailedToBoundSupport)?; + Ok(bounded_supports) +} + +impl Verifier for Pallet { + type AccountId = T::AccountId; + type Solution = SolutionOf; + type MaxBackersPerWinner = T::MaxBackersPerWinner; + type MaxWinnersPerPage = T::MaxWinnersPerPage; + type MaxBackersPerWinnerFinal = T::MaxBackersPerWinnerFinal; + + fn set_minimum_score(score: ElectionScore) { + MinimumScore::::put(score); + } + + fn ensure_claimed_score_improves(claimed_score: ElectionScore) -> bool { + Self::ensure_score_quality(claimed_score).is_ok() + } + + fn queued_score() -> Option { + QueuedSolution::::queued_score() + } + + fn kill() { + QueuedSolution::::kill(); + >::put(Status::Nothing); + } + + fn get_queued_solution_page(page: PageIndex) -> Option> { + QueuedSolution::::get_queued_solution_page(page) + } + + fn verify_synchronous_multi( + partial_solutions: Vec, + solution_pages: Vec, + claimed_score: ElectionScore, + ) -> Result<(), FeasibilityError> { + Self::do_verify_synchronous_multi(partial_solutions, solution_pages, claimed_score).map_err( + |(page, fe)| { + sublog!( + warn, + "verifier", + "sync verification of page {:?} failed due to {:?}.", + page, + fe + ); + Self::deposit_event(Event::::VerificationFailed(page, fe.clone())); + fe + }, + ) + } + + fn force_set_single_page_valid( + partial_supports: SupportsOfVerifier, + page: PageIndex, + score: ElectionScore, + ) { + Self::deposit_event(Event::::Queued(score, QueuedSolution::::queued_score())); + QueuedSolution::::force_set_single_page_valid(page, partial_supports, score); + } +} + +impl AsynchronousVerifier for Pallet { + type SolutionDataProvider = T::SolutionDataProvider; + + fn status() -> Status { + Pallet::::status_storage() + } + + fn start() -> Result<(), &'static str> { + sublog!(info, "verifier", "start signal received."); + if let Status::Nothing = Self::status() { + let claimed_score = Self::SolutionDataProvider::get_score().unwrap_or_default(); + if Self::ensure_score_quality(claimed_score).is_err() { + // don't do anything, report back that this solution was garbage. + Self::deposit_event(Event::::VerificationFailed( + crate::Pallet::::msp(), + FeasibilityError::ScoreTooLow, + )); + T::SolutionDataProvider::report_result(VerificationResult::Rejected); + // Despite being an instant-reject, this was a successful `start` operation. + Ok(()) + } else { + // This solution is good enough to win, we start verifying it in the next block. + StatusStorage::::put(Status::Ongoing(crate::Pallet::::msp())); + Ok(()) + } + } else { + sublog!(warn, "verifier", "start signal received while busy. This will be ignored."); + Err("verification ongoing") + } + } + + fn stop() { + sublog!(warn, "verifier", "stop signal received. clearing everything."); + + // we clear any ongoing solution's no been verified in any case, although this should only + // exist if we were doing something. + #[cfg(debug_assertions)] + assert!( + !matches!(StatusStorage::::get(), Status::Ongoing(_)) || + (matches!(StatusStorage::::get(), Status::Ongoing(_)) && + QueuedSolution::::invalid_iter().count() > 0) + ); + QueuedSolution::::clear_invalid_and_backings_unchecked(); + + // we also mutate the status back to doing nothing. + StatusStorage::::mutate(|old| { + if matches!(old, Status::Ongoing(_)) { + T::SolutionDataProvider::report_result(VerificationResult::Rejected) + } + *old = Status::Nothing; + }); + } +} diff --git a/substrate/frame/election-provider-multi-block/src/verifier/mod.rs b/substrate/frame/election-provider-multi-block/src/verifier/mod.rs new file mode 100644 index 0000000000000..0c3c33aba6e52 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/verifier/mod.rs @@ -0,0 +1,297 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # The Verifier Pallet +//! +//! This pallet has no end-user functionality, and is only used internally by other pallets in the +//! EPMB machinery to verify solutions. +//! +//! ### *Feasibility* Check +//! +//! Before explaining the pallet itself, it should be explained what a *verification* even means. +//! Verification of a solution page ([`crate::unsigned::miner::MinerConfig::Solution`]) includes the +//! process of checking all of its edges against a snapshot to be correct. For instance, all voters +//! that are presented in a solution page must have actually voted for the winner that they are +//! backing, based on the snapshot kept in the parent pallet. +//! +//! Such checks are bound to each page of the solution, and happen per-page. After checking all of +//! the edges in each page, a handful of other checks are performed. These checks cannot happen +//! per-page, and in order to do them we need to have the entire solution checked and verified. +//! +//! 1. Check that the total number of winners is sufficient (`DesiredTargets`). +//! 2. Check that the claimed score ([`sp_npos_elections::ElectionScore`]) is correct, +//! * and more than the minimum score that can be specified via [`Verifier::set_minimum_score`]. +//! 3. Check that all of the bounds of the solution are respected, namely +//! [`Verifier::MaxBackersPerWinner`], [`Verifier::MaxWinnersPerPage`] and +//! [`Verifier::MaxBackersPerWinnerFinal`]. +//! +//! Note that the common factor of all of the above checks is that they can ONLY be checked after +//! all pages are already verified. So, in the case of a multi-page verification, these checks are +//! performed at the last page. +//! +//! The errors that can arise while performing the feasibility check are encapsulated in +//! [`verifier::FeasibilityError`]. +//! +//! ## Modes of Verification +//! +//! The verifier pallet provide two modes of functionality: +//! +//! 1. Single or multi-page, synchronous verification. This is useful in the context of single-page, +//! emergency, or unsigned solutions that need to be verified on the fly. This is similar to how +//! the old school `multi-phase` pallet works. See [`Verifier::verify_synchronous`] and +//! [`Verifier::verify_synchronous_multi`]. +//! 2. Multi-page, asynchronous verification. This is useful in the context of multi-page, signed +//! solutions. See [`verifier::AsynchronousVerifier`] and [`verifier::SolutionDataProvider`]. +//! +//! Both of this, plus some helper functions, is exposed via the [`verifier::Verifier`] trait. +//! +//! ## Queued Solution +//! +//! once a solution has been verified, it is called a *queued solution*. It is sitting in a queue, +//! waiting for either of: +//! +//! 1. being challenged and potentially replaced by better solution, if any. +//! 2. being exported as the final outcome of the election. + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; +mod impls; +#[cfg(test)] +mod tests; + +// internal imports +pub use crate::weights::measured::pallet_election_provider_multi_block_verifier::*; + +use frame_election_provider_support::PageIndex; +use impls::SupportsOfVerifier; +pub use impls::{feasibility_check_page_inner_with_snapshot, pallet::*, Status}; +use sp_core::Get; +use sp_npos_elections::ElectionScore; +use sp_std::{fmt::Debug, prelude::*}; + +/// Errors that can happen in the feasibility check. +#[derive( + Debug, + Eq, + PartialEq, + codec::Encode, + codec::Decode, + codec::DecodeWithMemTracking, + scale_info::TypeInfo, + Clone, +)] +pub enum FeasibilityError { + /// Wrong number of winners presented. + WrongWinnerCount, + /// The snapshot is not available. + /// + /// Kinda defensive: The pallet should technically never attempt to do a feasibility check + /// when no snapshot is present. + SnapshotUnavailable, + /// A vote is invalid. + InvalidVote, + /// A voter is invalid. + InvalidVoter, + /// A winner is invalid. + InvalidWinner, + /// The given score was invalid. + InvalidScore, + /// The provided round is incorrect. + InvalidRound, + /// Solution does not have a good enough score. + ScoreTooLow, + /// The support type failed to be bounded. + /// + /// Relates to [`Config::MaxWinnersPerPage`], [`Config::MaxBackersPerWinner`] or + /// `MaxBackersPerWinnerFinal` + FailedToBoundSupport, + /// Internal error from the election crate. + NposElection(sp_npos_elections::Error), + /// The solution is incomplete, it has too few pages. + /// + /// This is (somewhat) synonym to `WrongPageCount` in other places. + Incomplete, +} + +impl From for FeasibilityError { + fn from(e: sp_npos_elections::Error) -> Self { + FeasibilityError::NposElection(e) + } +} + +/// The interface of something that can verify solutions for other sub-pallets in the multi-block +/// election pallet-network. +pub trait Verifier { + /// The solution type. + type Solution; + /// The account if type. + type AccountId; + + /// Maximum number of winners that can be represented in each page. + /// + /// A reasonable value for this should be the maximum number of winners that the election user + /// (e.g. the staking pallet) could ever desire. + type MaxWinnersPerPage: Get; + /// Maximum number of backers, per winner, among all pages of an election. + /// + /// This can only be checked at the very final step of verification. + type MaxBackersPerWinnerFinal: Get; + /// Maximum number of backers that each winner could have, per page. + type MaxBackersPerWinner: Get; + + /// Set the minimum score that is acceptable for any solution. + /// + /// Henceforth, all solutions must have at least this degree of quality, single-page or + /// multi-page. + fn set_minimum_score(score: ElectionScore); + + /// The score of the current best solution. `None` if there is none. + fn queued_score() -> Option; + + /// Check if the claimed score is sufficient to challenge the current queued solution, if any. + fn ensure_claimed_score_improves(claimed_score: ElectionScore) -> bool; + + /// Clear all storage items, there's nothing else to do until further notice. + fn kill(); + + /// Get a single page of the best verified solution, if any. + /// + /// It is the responsibility of the call site to call this function with all appropriate + /// `page` arguments. + fn get_queued_solution_page(page: PageIndex) -> Option>; + + /// Perform the feasibility check on the given single-page solution. + /// + /// This will perform: + /// + /// 1. feasibility-check + /// 2. claimed score is correct and an improvement. + /// 3. bounds are respected + /// + /// Corresponding snapshot (represented by `page`) is assumed to be available. + /// + /// If all checks pass, the solution is also queued. + fn verify_synchronous( + partial_solution: Self::Solution, + claimed_score: ElectionScore, + page: PageIndex, + ) -> Result<(), FeasibilityError> { + Self::verify_synchronous_multi(vec![partial_solution], vec![page], claimed_score) + } + + /// Perform synchronous feasibility check on the given multi-page solution. + /// + /// Same semantics as [`Self::verify_synchronous`], but for multi-page solutions. + fn verify_synchronous_multi( + partial_solution: Vec, + pages: Vec, + claimed_score: ElectionScore, + ) -> Result<(), FeasibilityError>; + + /// Force set a single page solution as the valid one. + /// + /// Will erase any previous solution. Should only be used in case of emergency fallbacks, + /// trusted governance solutions and so on. + fn force_set_single_page_valid( + partial_supports: SupportsOfVerifier, + page: PageIndex, + score: ElectionScore, + ); +} + +/// Simple enum to encapsulate the result of the verification of a candidate solution. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(test, derive(PartialEq, Eq))] +pub enum VerificationResult { + /// Solution is valid and is queued. + Queued, + /// Solution is rejected, for whichever of the multiple reasons that it could be. + Rejected, + /// The data needed (solution pages or the score) was unavailable. This should rarely happen. + DataUnavailable, +} + +/// Something that can provide candidate solutions to the verifier. +/// +/// In reality, this can be implemented by the [`crate::signed::Pallet`], where signed solutions are +/// queued and sorted based on claimed score, and they are put forth one by one, from best to worse. +pub trait SolutionDataProvider { + /// The opaque solution type. + type Solution; + + /// Return the `page`th page of the current best solution that the data provider has in store. + /// + /// If no candidate solutions are available, then None is returned. + fn get_page(page: PageIndex) -> Option; + + /// Get the claimed score of the current best solution. + fn get_score() -> Option; + + /// Hook to report back the results of the verification of the current candidate solution that + /// is being exposed via [`Self::get_page`] and [`Self::get_score`]. + /// + /// Every time that this is called, the verifier [`AsynchronousVerifier`] goes back to the + /// [`Status::Nothing`] state, and it is the responsibility of [`Self`] to call `start` again, + /// if desired. + fn report_result(result: VerificationResult); +} + +/// Something that can do the verification asynchronously. +pub trait AsynchronousVerifier: Verifier { + /// The data provider that can provide the candidate solution, and to whom we report back the + /// results. + type SolutionDataProvider: SolutionDataProvider; + + /// Get the current stage of the verification process. + fn status() -> Status; + + /// Start a verification process. + /// + /// Returns `Ok(())` if verification started successfully, and `Err(..)` if a verification is + /// already ongoing and therefore a new one cannot be started. + /// + /// From the coming block onwards, the verifier will start and fetch the relevant information + /// and solution pages from [`SolutionDataProvider`]. It is expected that the + /// [`SolutionDataProvider`] is ready before calling [`Self::start`]. + /// + /// Pages of the solution are fetched sequentially and in order from [`SolutionDataProvider`], + /// from `msp` to `lsp`. + /// + /// This ends in either of the two: + /// + /// 1. All pages, including the final checks (like score and other facts that can only be + /// derived from a full solution) are valid and the solution is verified. The solution is + /// queued and is ready for further export. + /// 2. The solution checks verification at one of the steps. Nothing is stored inside the + /// verifier pallet and all intermediary data is removed. + /// + /// In both cases, the [`SolutionDataProvider`] is informed via + /// [`SolutionDataProvider::report_result`]. It is sensible for the data provide to call `start` + /// again if the verification has failed, and nothing otherwise. Indeed, the + /// [`SolutionDataProvider`] must adjust its internal state such that it returns a new candidate + /// solution after each failure. + fn start() -> Result<(), &'static str>; + + /// Stop the verification. + /// + /// This is a force-stop operation, and should only be used in extreme cases where the + /// [`SolutionDataProvider`] wants to suddenly bail-out. + /// + /// An implementation should make sure that no loose ends remain state-wise, and everything is + /// cleaned. + fn stop(); +} diff --git a/substrate/frame/election-provider-multi-block/src/verifier/tests.rs b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs new file mode 100644 index 0000000000000..9dfc056881417 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs @@ -0,0 +1,1496 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + mock::{ + fake_solution, mine_solution, roll_to_snapshot_created, solution_from_supports, + verifier_events, ExtBuilder, MaxBackersPerWinner, MaxWinnersPerPage, MultiBlock, Runtime, + VerifierPallet, *, + }, + verifier::{impls::Status, Event, FeasibilityError, Verifier, *}, + PagedRawSolution, Snapshot, *, +}; +use frame_election_provider_support::Support; +use frame_support::{assert_noop, assert_ok}; +use sp_core::bounded_vec; +use sp_npos_elections::ElectionScore; +use sp_runtime::{traits::Bounded, Perbill}; + +mod feasibility_check { + use super::*; + + #[test] + fn missing_snapshot() { + ExtBuilder::verifier().build_unchecked().execute_with(|| { + // create snapshot just so that we can create a solution.. + roll_to_snapshot_created(); + let paged = mine_full_solution().unwrap(); + + // ..remove the only page of the target snapshot. + crate::Snapshot::::remove_target_page(); + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0), + FeasibilityError::SnapshotUnavailable + ); + }); + + ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| { + roll_to_snapshot_created(); + let paged = mine_full_solution().unwrap(); + + // ..remove just one of the pages of voter snapshot that is relevant. + crate::Snapshot::::remove_voter_page(0); + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0), + FeasibilityError::SnapshotUnavailable + ); + }); + + ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| { + roll_to_snapshot_created(); + let paged = mine_full_solution().unwrap(); + + // ..removing this page is not important, because we check page 0. + crate::Snapshot::::remove_voter_page(1); + + assert_ok!(VerifierPallet::feasibility_check_page_inner( + paged.solution_pages[0].clone(), + 0 + )); + }); + + ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| { + roll_to_snapshot_created(); + let paged = mine_full_solution().unwrap(); + + // `DesiredTargets` missing is also an error + crate::Snapshot::::kill_desired_targets(); + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0), + FeasibilityError::SnapshotUnavailable + ); + }); + + ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| { + roll_to_snapshot_created(); + let paged = mine_full_solution().unwrap(); + + // `DesiredTargets` is not checked here. + crate::Snapshot::::remove_target_page(); + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(paged.solution_pages[1].clone(), 0), + FeasibilityError::SnapshotUnavailable + ); + }); + } + + #[test] + fn winner_indices_single_page_must_be_in_bounds() { + ExtBuilder::verifier().pages(1).desired_targets(2).build_and_execute(|| { + roll_to_snapshot_created(); + let mut paged = mine_full_solution().unwrap(); + assert_eq!(crate::Snapshot::::targets().unwrap().len(), 4); + // ----------------------------------------------------^^ valid range is [0..3]. + + // Swap all votes from 3 to 4. here are only 4 targets, so index 4 is invalid. + paged.solution_pages[0] + .votes1 + .iter_mut() + .filter(|(_, t)| *t == TargetIndex::from(3u16)) + .for_each(|(_, t)| *t += 1); + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0), + FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex) + ); + }) + } + + #[test] + fn voter_indices_per_page_must_be_in_bounds() { + ExtBuilder::verifier() + .pages(1) + .voter_per_page(Bounded::max_value()) + .desired_targets(2) + .build_and_execute(|| { + roll_to_snapshot_created(); + let mut paged = mine_full_solution().unwrap(); + + assert_eq!(crate::Snapshot::::voters(0).unwrap().len(), 12); + // ------------------------------------------------^^ valid range is [0..11] in page + // 0. + + // Check that there is an index 11 in votes1, and flip to 12. There are only 12 + // voters, so index 12 is invalid. + assert!( + paged.solution_pages[0] + .votes1 + .iter_mut() + .filter(|(v, _)| *v == VoterIndex::from(11u32)) + .map(|(v, _)| *v = 12) + .count() > 0 + ); + assert_noop!( + VerifierPallet::feasibility_check_page_inner( + paged.solution_pages[0].clone(), + 0 + ), + FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex), + ); + }) + } + + #[test] + fn voter_must_have_same_targets_as_snapshot() { + ExtBuilder::verifier() + .pages(1) + .voter_per_page(Bounded::max_value()) + .desired_targets(2) + .build_and_execute(|| { + roll_to_snapshot_created(); + let mut paged = mine_full_solution().unwrap(); + + // First, check that voter at index 11 (40) actually voted for 3 (40) -- this is + // self vote. Then, change the vote to 2 (30). + assert_eq!( + paged.solution_pages[0] + .votes1 + .iter_mut() + .filter(|(v, t)| *v == 11 && *t == 3) + .map(|(_, t)| *t = 2) + .count(), + 1, + ); + assert_noop!( + VerifierPallet::feasibility_check_page_inner( + paged.solution_pages[0].clone(), + 0 + ), + FeasibilityError::InvalidVote, + ); + }) + } + + #[test] + fn heuristic_max_backers_per_winner_per_page() { + ExtBuilder::verifier().max_backers_per_winner(2).build_and_execute(|| { + roll_to_snapshot_created(); + + // these votes are all valid, but some dude has 3 supports in a single page. + let solution = solution_from_supports( + vec![(40, Support { total: 30, voters: vec![(2, 10), (3, 10), (4, 10)] })], + // all these voters are in page of the snapshot, the msp! + 2, + ); + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(solution, 2), + FeasibilityError::FailedToBoundSupport, + ); + }) + } + + #[test] + fn heuristic_desired_target_check_per_page() { + ExtBuilder::verifier().desired_targets(2).build_and_execute(|| { + roll_to(25); + assert_full_snapshot(); + + // all of these votes are valid, but this solution is already presenting 3 winners, + // while we just one 2. + let solution = solution_from_supports( + vec![ + (10, Support { total: 30, voters: vec![(4, 2)] }), + (20, Support { total: 30, voters: vec![(4, 2)] }), + (40, Support { total: 30, voters: vec![(4, 6)] }), + ], + // all these voters are in page 2 of the snapshot, the msp! + 2, + ); + + assert_noop!( + VerifierPallet::feasibility_check_page_inner(solution, 2), + FeasibilityError::WrongWinnerCount, + ); + }) + } +} + +mod async_verification { + use super::*; + use sp_core::bounded_vec; + // disambiguate event + use crate::verifier::Event; + + #[test] + fn basic_single_verification_works() { + ExtBuilder::verifier().pages(1).build_and_execute(|| { + // load a solution after the snapshot has been created. + roll_to_snapshot_created(); + + let solution = mine_full_solution().unwrap(); + load_mock_signed_and_start(solution.clone()); + + // now let it verify + roll_next(); + + // It done after just one block. + assert_eq!(VerifierPallet::status(), Status::Nothing); + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(0, 2), + Event::::Queued(solution.score, None) + ] + ); + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); + }); + } + + #[test] + fn basic_multi_verification_works() { + ExtBuilder::verifier().pages(3).build_and_execute(|| { + // load a solution after the snapshot has been created. + roll_to_snapshot_created(); + + let solution = mine_full_solution().unwrap(); + // ------------- ^^^^^^^^^^^^ + + load_mock_signed_and_start(solution.clone()); + assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); + assert_eq!(QueuedSolution::::valid_iter().count(), 0); + + // now let it verify + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); + assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); + // 1 page verified, stored as invalid. + assert_eq!(QueuedSolution::::invalid_iter().count(), 1); + + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Ongoing(0)); + assert_eq!( + verifier_events(), + vec![Event::::Verified(2, 2), Event::::Verified(1, 2),] + ); + // 2 pages verified, stored as invalid. + assert_eq!(QueuedSolution::::invalid_iter().count(), 2); + + // nothing is queued yet. + assert_eq!(MockSignedResults::get(), vec![]); + assert_eq!(QueuedSolution::::valid_iter().count(), 0); + assert!(QueuedSolution::::queued_score().is_none()); + + // last block. + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Nothing); + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::Verified(1, 2), + Event::::Verified(0, 2), + Event::::Queued(solution.score, None), + ] + ); + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); + + // a solution has been queued + assert_eq!(QueuedSolution::::valid_iter().count(), 3); + assert!(QueuedSolution::::queued_score().is_some()); + }); + } + + #[test] + fn basic_multi_verification_partial() { + ExtBuilder::verifier().pages(3).build_and_execute(|| { + // load a solution after the snapshot has been created. + roll_to_snapshot_created(); + + let solution = mine_solution(2).unwrap(); + // -------------------------^^^ + + load_mock_signed_and_start(solution.clone()); + + assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); + assert_eq!(QueuedSolution::::valid_iter().count(), 0); + + // now let it verify + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); + assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); + // 1 page verified, stored as invalid. + assert_eq!(QueuedSolution::::invalid_iter().count(), 1); + + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Ongoing(0)); + assert_eq!( + verifier_events(), + vec![Event::::Verified(2, 2), Event::::Verified(1, 2),] + ); + // 2 page verified, stored as invalid. + assert_eq!(QueuedSolution::::invalid_iter().count(), 2); + + // nothing is queued yet. + assert_eq!(MockSignedResults::get(), vec![]); + assert_eq!(QueuedSolution::::valid_iter().count(), 0); + assert!(QueuedSolution::::queued_score().is_none()); + + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Nothing); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::Verified(1, 2), + // this is a partial solution, no one in this page (lsp). + Event::::Verified(0, 0), + Event::::Queued(solution.score, None), + ] + ); + + // a solution has been queued + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); + assert_eq!(QueuedSolution::::valid_iter().count(), 3); + assert!(QueuedSolution::::queued_score().is_some()); + + // page 0 is empty.. + assert_eq!(QueuedSolution::::get_valid_page(0).unwrap().len(), 0); + // .. the other two are not. + assert_eq!(QueuedSolution::::get_valid_page(1).unwrap().len(), 2); + assert_eq!(QueuedSolution::::get_valid_page(2).unwrap().len(), 2); + }); + } + + #[test] + fn solution_data_provider_failing_initial() { + ExtBuilder::verifier().build_and_execute(|| { + // not super important, but anyways.. + roll_to_snapshot_created(); + + // The solution data provider is empty. + assert_eq!(SignedPhaseSwitch::get(), SignedSwitch::Mock); + assert_eq!(MockSignedNextSolution::get(), None); + + // nothing happens.. + assert_eq!(VerifierPallet::status(), Status::Nothing); + assert_ok!(::start()); + assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); + + roll_next(); + + // we instantly stop. + assert_eq!(verifier_events(), vec![Event::::VerificationDataUnavailable]); + assert_eq!(VerifierPallet::status(), Status::Nothing); + assert!(QueuedSolution::::invalid_iter().count().is_zero()); + assert!(QueuedSolution::::backing_iter().count().is_zero()); + + // and we report invalid back. + assert_eq!(MockSignedResults::get(), vec![VerificationResult::DataUnavailable]); + }); + } + + #[test] + fn solution_data_provider_failing_midway() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let solution = mine_full_solution().unwrap(); + load_mock_signed_and_start(solution.clone()); + + assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); + + // now let it verify. first one goes fine. + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); + assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); + assert_eq!(MockSignedResults::get(), vec![]); + + // 1 page verified, stored as invalid. + assert_eq!(QueuedSolution::::invalid_iter().count(), 1); + assert_eq!(QueuedSolution::::backing_iter().count(), 1); + assert_eq!(QueuedSolution::::valid_iter().count(), 0); + + // suddenly clear this guy. + MockSignedNextSolution::set(None); + MockSignedNextScore::set(None); + + roll_next(); + + // we instantly stop. + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::VerificationDataUnavailable + ] + ); + assert_eq!(VerifierPallet::status(), Status::Nothing); + assert_eq!(QueuedSolution::::invalid_iter().count(), 0); + assert_eq!(QueuedSolution::::valid_iter().count(), 0); + assert_eq!(QueuedSolution::::backing_iter().count(), 0); + + // and we report invalid back. + assert_eq!(MockSignedResults::get(), vec![VerificationResult::DataUnavailable]); + }) + } + + #[test] + fn rejects_new_verification_via_start_if_ongoing() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let solution = mine_full_solution().unwrap(); + load_mock_signed_and_start(solution.clone()); + + assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); + + // nada + assert_noop!(::start(), "verification ongoing"); + + // now let it verify. first one goes fine. + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); + assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); + assert_eq!(MockSignedResults::get(), vec![]); + + // retry, still nada. + assert_noop!(::start(), "verification ongoing"); + }) + } + + #[test] + fn stop_clears_everything() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let solution = mine_full_solution().unwrap(); + load_mock_signed_and_start(solution.clone()); + + assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); + + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); + assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); + + roll_next(); + assert_eq!(VerifierPallet::status(), Status::Ongoing(0)); + assert_eq!( + verifier_events(), + vec![Event::::Verified(2, 2), Event::::Verified(1, 2)] + ); + + // now suddenly, we stop + ::stop(); + assert_eq!(VerifierPallet::status(), Status::Nothing); + + // everything is cleared. + assert_eq!(QueuedSolution::::invalid_iter().count(), 0); + assert_eq!(QueuedSolution::::valid_iter().count(), 0); + assert_eq!(QueuedSolution::::backing_iter().count(), 0); + + // and we report invalid back that something was rejected. + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); + }) + } + + #[test] + fn weak_valid_solution_is_insta_rejected() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let paged = mine_full_solution().unwrap(); + load_mock_signed_and_start(paged.clone()); + let _ = roll_to_full_verification(); + + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 2), + Event::Verified(1, 2), + Event::Verified(0, 2), + Event::Queued(paged.score, None) + ] + ); + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); + + // good boi, but you are too weak. This solution also does not have the full pages, + // which is also fine. See `basic_multi_verification_partial`. + let weak_page_partial = + solution_from_supports(vec![(10, Support { total: 10, voters: vec![(1, 10)] })], 2); + let weak_paged = PagedRawSolution:: { + solution_pages: bounded_vec![weak_page_partial], + score: ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 }, + ..Default::default() + }; + + load_mock_signed_and_start(weak_paged.clone()); + // this is insta-rejected, no need to proceed any more blocks. + + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 2), + Event::Verified(1, 2), + Event::Verified(0, 2), + Event::Queued(paged.score, None), + Event::VerificationFailed(2, FeasibilityError::ScoreTooLow) + ] + ); + + assert_eq!( + MockSignedResults::get(), + vec![VerificationResult::Queued, VerificationResult::Rejected] + ); + }) + } + + #[test] + fn better_valid_solution_replaces() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + // a weak one, which we will still accept. + let weak_page_partial = solution_from_supports( + vec![ + (10, Support { total: 10, voters: vec![(1, 10)] }), + (20, Support { total: 10, voters: vec![(4, 10)] }), + ], + 2, + ); + let weak_paged = PagedRawSolution:: { + solution_pages: bounded_vec![weak_page_partial], + score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 }, + ..Default::default() + }; + + load_mock_signed_and_start(weak_paged.clone()); + let _ = roll_to_full_verification(); + + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 2), + Event::Verified(1, 0), // note: partial solution! + Event::Verified(0, 0), // note: partial solution! + Event::Queued(weak_paged.score, None) + ] + ); + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); + + let paged = mine_full_solution().unwrap(); + load_mock_signed_and_start(paged.clone()); + let _ = roll_to_full_verification(); + + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 2), + Event::Verified(1, 0), + Event::Verified(0, 0), + Event::Queued(weak_paged.score, None), + Event::Verified(2, 2), + Event::Verified(1, 2), + Event::Verified(0, 2), + Event::Queued(paged.score, Some(weak_paged.score)) + ] + ); + assert_eq!( + MockSignedResults::get(), + vec![VerificationResult::Queued, VerificationResult::Queued] + ); + }) + } + + #[test] + fn invalid_solution_bad_score() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + let mut paged = mine_full_solution().unwrap(); + + // just tweak score. + paged.score.minimal_stake += 1; + assert!(::queued_score().is_none()); + + load_mock_signed_and_start(paged); + roll_to_full_verification(); + + // nothing is verified. + assert!(::queued_score().is_none()); + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::Verified(1, 2), + Event::::Verified(0, 2), + Event::::VerificationFailed(0, FeasibilityError::InvalidScore) + ] + ); + + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); + }) + } + + #[test] + fn invalid_solution_bad_minimum_score() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + let paged = mine_full_solution().unwrap(); + + // our minimum score is our score, just a bit better. + let mut better_score = paged.score; + better_score.minimal_stake += 1; + ::set_minimum_score(better_score); + + load_mock_signed_and_start(paged); + + // note that we don't need to call to `roll_to_full_verification`, since this solution + // is pretty much insta-rejected; + assert_eq!( + verifier_events(), + vec![Event::::VerificationFailed(2, FeasibilityError::ScoreTooLow)] + ); + + // nothing is verified.. + assert!(::queued_score().is_none()); + + // result is reported back. + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); + }) + } + + #[test] + fn invalid_solution_bad_desired_targets() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + assert_eq!(crate::Snapshot::::desired_targets().unwrap(), 2); + let paged = mine_full_solution().unwrap(); + + // tweak this, for whatever reason. + crate::Snapshot::::set_desired_targets(3); + + load_mock_signed_and_start(paged); + roll_to_full_verification(); + + // we detect this only in the last page. + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 2), + Event::Verified(1, 2), + Event::Verified(0, 2), + Event::VerificationFailed(0, FeasibilityError::WrongWinnerCount) + ] + ); + + // nothing is verified.. + assert!(::queued_score().is_none()); + // result is reported back. + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); + }) + } + + #[test] + fn invalid_solution_bad_bounds_per_page() { + ExtBuilder::verifier() + .desired_targets(1) + .max_backers_per_winner(1) // in each page we allow 1 baker to be presented. + .build_and_execute(|| { + roll_to_snapshot_created(); + + // This is a sneaky custom solution where it will fail in the second page. + let page0 = solution_from_supports( + vec![(10, Support { total: 10, voters: vec![(1, 10)] })], + 2, + ); + let page1 = solution_from_supports( + vec![(10, Support { total: 20, voters: vec![(5, 10), (8, 10)] })], + 1, + ); + let page2 = solution_from_supports( + vec![(10, Support { total: 10, voters: vec![(10, 10)] })], + 0, + ); + let paged = PagedRawSolution { + solution_pages: bounded_vec![page0, page1, page2], + score: Default::default(), // score is never checked, so nada + ..Default::default() + }; + + load_mock_signed_and_start(paged); + roll_to_full_verification(); + + // we detect the bound issue in page 2. + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 1), + Event::VerificationFailed(1, FeasibilityError::FailedToBoundSupport) + ] + ); + + // our state is fully cleaned. + QueuedSolution::::assert_killed(); + assert_eq!(StatusStorage::::get(), Status::Nothing); + // nothing is verified.. + assert!(::queued_score().is_none()); + // result is reported back. + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); + }) + } + + #[test] + fn invalid_solution_bad_bounds_final() { + ExtBuilder::verifier() + .desired_targets(1) + .max_backers_per_winner_final(2) + .build_and_execute(|| { + roll_to_snapshot_created(); + + // This is a sneaky custom solution where in each page 10 has 1 backers, so only in + // the last page we can catch the mfer. + let page0 = solution_from_supports( + vec![(10, Support { total: 10, voters: vec![(1, 10)] })], + 2, + ); + let page1 = solution_from_supports( + vec![(10, Support { total: 10, voters: vec![(5, 10)] })], + 1, + ); + let page2 = solution_from_supports( + vec![(10, Support { total: 10, voters: vec![(10, 10)] })], + 0, + ); + let paged = PagedRawSolution { + solution_pages: bounded_vec![page0, page1, page2], + score: ElectionScore { + minimal_stake: 30, + sum_stake: 30, + sum_stake_squared: 900, + }, + ..Default::default() + }; + + load_mock_signed_and_start(paged); + roll_to_full_verification(); + + // we detect this only in the last page. + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 1), + Event::Verified(1, 1), + Event::Verified(0, 1), + Event::VerificationFailed(0, FeasibilityError::FailedToBoundSupport) + ] + ); + + // our state is fully cleaned. + QueuedSolution::::assert_killed(); + assert_eq!(StatusStorage::::get(), Status::Nothing); + + // nothing is verified.. + assert!(::queued_score().is_none()); + // result is reported back. + assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); + }) + } + + #[test] + fn invalid_solution_does_not_alter_queue() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + let mut paged = mine_full_solution().unwrap(); + let correct_score = paged.score; + + assert!(::queued_score().is_none()); + + load_mock_signed_and_start(paged.clone()); + roll_to_full_verification(); + + assert_eq!(::queued_score(), Some(correct_score)); + assert!(QueuedSolution::::invalid_iter().count().is_zero()); + assert!(QueuedSolution::::backing_iter().count().is_zero()); + + // just tweak score. Note that we tweak for a higher score, so the verifier will accept + // it. + paged.score.minimal_stake += 1; + load_mock_signed_and_start(paged.clone()); + roll_to_full_verification(); + + // nothing is verified. + assert_eq!(::queued_score(), Some(correct_score)); + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::Verified(1, 2), + Event::::Verified(0, 2), + Event::::Queued(correct_score, None), + Event::::Verified(2, 2), + Event::::Verified(1, 2), + Event::::Verified(0, 2), + Event::::VerificationFailed(0, FeasibilityError::InvalidScore), + ] + ); + + // the verification results. + assert_eq!( + MockSignedResults::get(), + vec![VerificationResult::Queued, VerificationResult::Rejected] + ); + + // and the queue is still in good shape. + assert_eq!(::queued_score(), Some(correct_score)); + assert!(QueuedSolution::::invalid_iter().count().is_zero()); + assert!(QueuedSolution::::backing_iter().count().is_zero()); + }) + } +} + +mod multi_page_sync_verification { + use super::*; + use frame_support::hypothetically; + + #[test] + fn basic_sync_verification_works() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + let paged = mine_solution(2).unwrap(); + + assert_eq!(verifier_events(), vec![]); + assert_eq!(::queued_score(), None); + + let _ = ::verify_synchronous_multi( + paged.solution_pages.clone().into_inner(), + MultiBlock::msp_range_for(2), + paged.score, + ) + .unwrap(); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(1, 2), + Event::::Verified(2, 2), + Event::::Queued(paged.score, None) + ] + ); + assert_eq!(::queued_score(), Some(paged.score)); + }) + } + + #[test] + fn basic_sync_verification_works_full() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + let paged = mine_full_solution().unwrap(); + + assert_eq!(verifier_events(), vec![]); + assert_eq!(::queued_score(), None); + + let _ = ::verify_synchronous_multi( + paged.solution_pages.clone().into_inner(), + MultiBlock::msp_range_for(3), + paged.score, + ) + .unwrap(); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(0, 2), + Event::::Verified(1, 2), + Event::::Verified(2, 2), + Event::::Queued(paged.score, None) + ] + ); + assert_eq!(::queued_score(), Some(paged.score)); + }) + } + + #[test] + fn incorrect_score_checked_at_end() { + ExtBuilder::verifier().build_and_execute(|| { + // A solution that where each individual page is valid, but the final score is bad. + roll_to_snapshot_created(); + let mut paged = mine_solution(2).unwrap(); + paged.score.minimal_stake += 1; + + assert_eq!(verifier_events(), vec![]); + assert_eq!(::queued_score(), None); + + assert_eq!( + ::verify_synchronous_multi( + paged.solution_pages.clone().into_inner(), + MultiBlock::msp_range_for(2), + paged.score, + ) + .unwrap_err(), + FeasibilityError::InvalidScore + ); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(1, 2), + Event::::Verified(2, 2), + Event::::VerificationFailed(2, FeasibilityError::InvalidScore), + ] + ); + assert_eq!(::queued_score(), None); + }) + } + + #[test] + fn invalid_second_page() { + ExtBuilder::verifier().build_and_execute(|| { + // A solution that where the second validated page is invalid. + use frame_election_provider_support::traits::NposSolution; + roll_to_snapshot_created(); + let mut paged = mine_solution(2).unwrap(); + paged.solution_pages.last_mut().map(|p| p.corrupt()); + + assert_eq!(verifier_events(), vec![]); + assert_eq!(::queued_score(), None); + + assert_eq!( + ::verify_synchronous_multi( + paged.solution_pages.clone().into_inner(), + MultiBlock::msp_range_for(2), + paged.score, + ) + .unwrap_err(), + FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex) + ); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(1, 2), + Event::::VerificationFailed( + 2, + FeasibilityError::NposElection( + sp_npos_elections::Error::SolutionInvalidIndex + ) + ), + ] + ); + assert_eq!(::queued_score(), None); + }) + } + + #[test] + fn too_may_max_backers_per_winner_second_page() { + ExtBuilder::verifier().build_and_execute(|| { + // A solution that where the at the second page with hit the final max backers per + // winner final bound. + roll_to_snapshot_created(); + let paged = mine_solution(2).unwrap(); + + hypothetically!({ + assert_ok!(::verify_synchronous_multi( + paged.solution_pages.clone().into_inner(), + MultiBlock::msp_range_for(2), + paged.score, + )); + let p1 = QueuedSolution::::get_queued_solution_page(1).unwrap(); + let p2 = QueuedSolution::::get_queued_solution_page(2).unwrap(); + + // 40 has 2 backers in the first page, and 3 in the second + assert_eq!( + p1.into_iter() + .find_map(|(who, support)| { + if who == 40 { + Some(support.voters.len()) + } else { + None + } + }) + .unwrap(), + 2 + ); + + assert_eq!( + p2.into_iter() + .find_map(|(who, support)| { + if who == 40 { + Some(support.voters.len()) + } else { + None + } + }) + .unwrap(), + 3 + ); + }); + + // From the above, we know setting this will do the trick + MaxBackersPerWinnerFinal::set(4); + + assert_eq!(verifier_events(), vec![]); + assert_eq!(::queued_score(), None); + + assert_eq!( + ::verify_synchronous_multi( + paged.solution_pages.clone().into_inner(), + MultiBlock::msp_range_for(2), + paged.score, + ) + .unwrap_err(), + FeasibilityError::FailedToBoundSupport + ); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(1, 2), + Event::::VerificationFailed(2, FeasibilityError::FailedToBoundSupport), + ] + ); + assert_eq!(::queued_score(), None); + }) + } +} + +mod single_page_sync_verification { + use super::*; + + #[test] + fn basic_sync_verification_works() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + let single_page = mine_solution(1).unwrap(); + + assert_eq!(verifier_events(), vec![]); + assert_eq!(::queued_score(), None); + + let _ = ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap(); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::Queued(single_page.score, None) + ] + ); + assert_eq!(::queued_score(), Some(single_page.score)); + }) + } + + #[test] + fn winner_count_more() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + let single_page = mine_solution(1).unwrap(); + + // change the snapshot, as if the desired targets is now 1. This solution is then valid, + // but has too many. + Snapshot::::set_desired_targets(1); + + assert_eq!(verifier_events(), vec![]); + assert_eq!(::queued_score(), None); + + // note: this is NOT a storage_noop! because we do emit events. + assert_eq!( + ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::WrongWinnerCount + ); + + assert_eq!( + verifier_events(), + vec![Event::::VerificationFailed(2, FeasibilityError::WrongWinnerCount)] + ); + assert_eq!(::queued_score(), None); + }) + } + + #[test] + fn winner_count_less() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + let single_page = mine_solution(1).unwrap(); + + assert_eq!(verifier_events(), vec![]); + assert_eq!(::queued_score(), None); + + // Valid solution, but has now too few. + Snapshot::::set_desired_targets(3); + + assert_eq!( + ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::WrongWinnerCount + ); + + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 2), + Event::::VerificationFailed(2, FeasibilityError::WrongWinnerCount) + ] + ); + assert_eq!(::queued_score(), None); + }) + } + + #[test] + fn incorrect_score_is_rejected() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let single_page = mine_solution(1).unwrap(); + let mut score_incorrect = single_page.score; + score_incorrect.minimal_stake += 1; + + assert_eq!( + ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + score_incorrect, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::InvalidScore + ); + + assert_eq!( + verifier_events(), + vec![ + Event::Verified(2, 2), + Event::::VerificationFailed(2, FeasibilityError::InvalidScore), + ] + ); + }) + } + + #[test] + fn minimum_untrusted_score_is_rejected() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let single_page = mine_solution(1).unwrap(); + + // raise the bar such that we don't meet it. + let mut unattainable_score = single_page.score; + unattainable_score.minimal_stake += 1; + + ::set_minimum_score(unattainable_score); + + assert_eq!( + ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::ScoreTooLow + ); + + assert_eq!( + verifier_events(), + vec![Event::::VerificationFailed(2, FeasibilityError::ScoreTooLow)] + ); + }) + } + + #[test] + fn bad_bounds_rejected_max_backers_per_winner() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let single_page = mine_solution(1).unwrap(); + // note: change this after the miner is done, otherwise it is smart enough to trim. + MaxBackersPerWinner::set(1); + + assert_eq!( + ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::FailedToBoundSupport + ); + + assert_eq!( + verifier_events(), + vec![Event::::VerificationFailed( + 2, + FeasibilityError::FailedToBoundSupport + )] + ); + }); + } + + #[test] + fn bad_bounds_rejected_max_winners_per_page() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let single_page = mine_solution(1).unwrap(); + // note: the miner does feasibility internally, change this parameter afterwards. + MaxWinnersPerPage::set(1); + + assert_eq!( + ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::FailedToBoundSupport + ); + + assert_eq!( + verifier_events(), + vec![Event::::VerificationFailed( + 2, + FeasibilityError::FailedToBoundSupport + )] + ); + }); + } + + #[test] + fn bad_bounds_rejected_max_backers_per_winner_final() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let single_page = mine_solution(1).unwrap(); + // note: the miner does feasibility internally, change this parameter afterwards. + MaxBackersPerWinnerFinal::set(1); + + assert_eq!( + ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::FailedToBoundSupport + ); + + assert_eq!( + verifier_events(), + vec![Event::::VerificationFailed( + 2, + FeasibilityError::FailedToBoundSupport + )] + ); + }); + } + + #[test] + fn solution_improvement_threshold_respected() { + ExtBuilder::verifier() + .solution_improvement_threshold(Perbill::from_percent(10)) + .build_and_execute(|| { + roll_to_snapshot_created(); + + // submit something good. + let single_page = mine_solution(1).unwrap(); + let _ = ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap(); + + // the slightly better solution need not even be correct. We improve it by 5%, but + // we need 10%. + let mut better_score = single_page.score; + let improvement = Perbill::from_percent(5) * better_score.minimal_stake; + better_score.minimal_stake += improvement; + let slightly_better = fake_solution(better_score); + + assert_eq!( + ::verify_synchronous( + slightly_better.solution_pages.first().cloned().unwrap(), + slightly_better.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::ScoreTooLow + ); + }); + } + + #[test] + fn weak_score_is_insta_rejected() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + // queue something useful. + let single_page = mine_solution(1).unwrap(); + let _ = ::verify_synchronous( + single_page.solution_pages.first().cloned().unwrap(), + single_page.score, + MultiBlock::msp(), + ) + .unwrap(); + assert_eq!(::queued_score(), Some(single_page.score)); + + // now try and submit that's really weak. Doesn't even need to be valid, since the score + // is checked first. + let mut bad_score = single_page.score; + bad_score.minimal_stake -= 1; + let weak = fake_solution(bad_score); + + assert_eq!( + ::verify_synchronous( + weak.solution_pages.first().cloned().unwrap(), + weak.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::ScoreTooLow + ); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::Queued(single_page.score, None), + Event::::VerificationFailed(2, FeasibilityError::ScoreTooLow), + ] + ); + }) + } + + #[test] + fn good_solution_replaces() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + let weak_solution = solution_from_supports( + vec![ + (10, Support { total: 10, voters: vec![(1, 10)] }), + (20, Support { total: 10, voters: vec![(4, 10)] }), + ], + 2, + ); + + let weak_paged = PagedRawSolution:: { + solution_pages: bounded_vec![weak_solution], + score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 }, + ..Default::default() + }; + + let _ = ::verify_synchronous( + weak_paged.solution_pages.first().cloned().unwrap(), + weak_paged.score, + MultiBlock::msp(), + ) + .unwrap(); + assert_eq!(::queued_score(), Some(weak_paged.score)); + + // now get a better solution. + let better = mine_solution(1).unwrap(); + + let _ = ::verify_synchronous( + better.solution_pages.first().cloned().unwrap(), + better.score, + MultiBlock::msp(), + ) + .unwrap(); + + assert_eq!(::queued_score(), Some(better.score)); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::Queued(weak_paged.score, None), + Event::::Verified(2, 2), + Event::::Queued(better.score, Some(weak_paged.score)), + ] + ); + }) + } + + #[test] + fn weak_valid_is_discarded() { + ExtBuilder::verifier().build_and_execute(|| { + roll_to_snapshot_created(); + + // first, submit something good + let better = mine_solution(1).unwrap(); + let _ = ::verify_synchronous( + better.solution_pages.first().cloned().unwrap(), + better.score, + MultiBlock::msp(), + ) + .unwrap(); + assert_eq!(::queued_score(), Some(better.score)); + + // then try with something weaker. + let weak_solution = solution_from_supports( + vec![ + (10, Support { total: 10, voters: vec![(1, 10)] }), + (20, Support { total: 10, voters: vec![(4, 10)] }), + ], + 2, + ); + let weak_paged = PagedRawSolution:: { + solution_pages: bounded_vec![weak_solution], + score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 }, + ..Default::default() + }; + + assert_eq!( + ::verify_synchronous( + weak_paged.solution_pages.first().cloned().unwrap(), + weak_paged.score, + MultiBlock::msp(), + ) + .unwrap_err(), + FeasibilityError::ScoreTooLow + ); + + // queued solution has not changed. + assert_eq!(::queued_score(), Some(better.score)); + + assert_eq!( + verifier_events(), + vec![ + Event::::Verified(2, 2), + Event::::Queued(better.score, None), + Event::::VerificationFailed(2, FeasibilityError::ScoreTooLow), + ] + ); + }) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/comp_weights.sh b/substrate/frame/election-provider-multi-block/src/weights/comp_weights.sh new file mode 100755 index 0000000000000..7f0832b326a7e --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/comp_weights.sh @@ -0,0 +1,28 @@ + +function display { + subweight compare files \ + --method asymptotic \ + --new $1 \ + --old $2 \ + --unit proof --verbose --threshold 0 + + # subweight compare files \ + # --method asymptotic \ + # --new $1 \ + # --old $2 \ + # --unit time --verbose --threshold 0 +} + +## Polkadot + +echo "#### new: polkadot/pallet_election_provider_multi_block.rs old: kusama" +display "polkadot/measured/pallet_election_provider_multi_block.rs" "kusama/measured/pallet_election_provider_multi_block.rs" + +echo "#### new: polkadot/pallet_election_provider_multi_block_signed.rs old: kusama" +display "polkadot/measured/pallet_election_provider_multi_block_signed.rs" "kusama/measured/pallet_election_provider_multi_block_signed.rs" + +echo "#### new: polkadot/pallet_election_provider_multi_block_unsigned.rs old: kusama" +display "polkadot/measured/pallet_election_provider_multi_block_unsigned.rs" "kusama/measured/pallet_election_provider_multi_block_unsigned.rs" + +echo "#### new: polkadot/pallet_election_provider_multi_block_verifier.rs old: kusama" +display "polkadot/measured/pallet_election_provider_multi_block_verifier.rs" "kusama/measured/pallet_election_provider_multi_block_verifier.rs" diff --git a/substrate/frame/election-provider-multi-block/src/weights/display_weights.sh b/substrate/frame/election-provider-multi-block/src/weights/display_weights.sh new file mode 100755 index 0000000000000..a11478e332168 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/display_weights.sh @@ -0,0 +1,46 @@ + +function display { + subweight compare files \ + --method asymptotic \ + --new $1 \ + --old $1 \ + --unit proof \ + --verbose \ + --threshold 0 + + # subweight compare files \ + # --method asymptotic \ + # --new $1 \ + # --old $1 \ + # --unit time \ + # --verbose \ + # --threshold 0 +} + +## Polkadot + +echo "#### polkadot/pallet_election_provider_multi_block.rs" +display "polkadot/measured/pallet_election_provider_multi_block.rs" + +echo "#### polkadot/pallet_election_provider_multi_block_signed.rs" +display "polkadot/measured/pallet_election_provider_multi_block_signed.rs" + +echo "#### polkadot/pallet_election_provider_multi_block_unsigned.rs" +display "polkadot/measured/pallet_election_provider_multi_block_unsigned.rs" + +echo "#### polkadot/pallet_election_provider_multi_block_verifier.rs" +display "polkadot/measured/pallet_election_provider_multi_block_verifier.rs" + +## Kusama + +echo "#### kusama/pallet_election_provider_multi_block.rs" +display "kusama/measured/pallet_election_provider_multi_block.rs" + +echo "#### kusama/pallet_election_provider_multi_block_signed.rs" +display "kusama/measured/pallet_election_provider_multi_block_signed.rs" + +echo "#### kusama/pallet_election_provider_multi_block_unsigned.rs" +display "kusama/measured/pallet_election_provider_multi_block_unsigned.rs" + +echo "#### kusama/pallet_election_provider_multi_block_verifier.rs" +display "kusama/measured/pallet_election_provider_multi_block_verifier.rs" diff --git a/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block.rs b/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block.rs new file mode 100644 index 0000000000000..0341382701f6e --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block.rs @@ -0,0 +1,622 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-04-17, STEPS: `5`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` + +// Executed Command: +// ../../../../../target/release/frame-omni-bencher +// v1 +// benchmark +// pallet +// --pallet +// pallet_election_provider_multi_block +// --extrinsic +// all +// --runtime +// ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm +// --steps +// 5 +// --repeat +// 10 +// --genesis-builder-preset +// ksm_size +// --template +// ../../../../../substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --output +// ./pallet_election_provider_multi_block_ksm_size.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block`. +pub trait WeightInfo { + fn on_initialize_nothing() -> Weight; + fn on_initialize_into_snapshot_msp() -> Weight; + fn on_initialize_into_snapshot_rest() -> Weight; + fn on_initialize_into_signed() -> Weight; + fn on_initialize_into_signed_validation() -> Weight; + fn on_initialize_into_unsigned() -> Weight; + fn export_non_terminal() -> Weight; + fn export_terminal() -> Weight; + fn manage() -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_nothing() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 24_740_000 picoseconds. + Weight::from_parts(25_460_000, 3612) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + fn on_initialize_into_snapshot_msp() -> Weight { + // Proof Size summary in bytes: + // Measured: `49026` + // Estimated: `2527491` + // Minimum execution time: 9_186_586_000 picoseconds. + Weight::from_parts(9_221_526_000, 2527491) + .saturating_add(T::DbWeight::get().reads(1008_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListBags` (r:1 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:783 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:781 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:781 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:781 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: `Staking::Validators` (r:395 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_snapshot_rest() -> Weight { + // Proof Size summary in bytes: + // Measured: `1460558` + // Estimated: `3399473` + // Minimum execution time: 49_612_007_000 picoseconds. + Weight::from_parts(49_882_360_000, 3399473) + .saturating_add(T::DbWeight::get().reads(3530_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:783 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:781 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:781 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:781 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6358acd2035ec4bb863fa981e0c177b9` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6358acd2035ec4bb863fa981e0c177b9` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `1585637` + // Estimated: `3524552` + // Minimum execution time: 50_523_714_000 picoseconds. + Weight::from_parts(50_697_786_000, 3524552) + .saturating_add(T::DbWeight::get().reads(3135_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_into_signed_validation() -> Weight { + // Proof Size summary in bytes: + // Measured: `335` + // Estimated: `3800` + // Minimum execution time: 3_302_514_000 picoseconds. + Weight::from_parts(3_390_244_000, 3800) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + fn on_initialize_into_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `331` + // Estimated: `3796` + // Minimum execution time: 3_348_884_000 picoseconds. + Weight::from_parts(3_394_384_000, 3796) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:829 w:829) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:829 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:829) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:806) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn export_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `168548` + // Estimated: `2221313` + // Minimum execution time: 23_179_673_000 picoseconds. + Weight::from_parts(23_408_295_000, 2221313) + .saturating_add(T::DbWeight::get().reads(1666_u64)) + .saturating_add(T::DbWeight::get().writes(2467_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:16 w:16) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::Round` (r:1 w:1) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:16 w:16) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:16 w:16) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:955 w:955) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:955 w:956) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:955 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:955) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:0 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + fn export_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1918258` + // Estimated: `4282873` + // Minimum execution time: 50_591_761_000 picoseconds. + Weight::from_parts(50_753_934_000, 4282873) + .saturating_add(T::DbWeight::get().reads(2923_u64)) + .saturating_add(T::DbWeight::get().writes(2924_u64)) + } + fn manage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 120_000 picoseconds. + Weight::from_parts(140_000, 0) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_nothing() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 24_740_000 picoseconds. + Weight::from_parts(25_460_000, 3612) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + fn on_initialize_into_snapshot_msp() -> Weight { + // Proof Size summary in bytes: + // Measured: `49026` + // Estimated: `2527491` + // Minimum execution time: 9_186_586_000 picoseconds. + Weight::from_parts(9_221_526_000, 2527491) + .saturating_add(RocksDbWeight::get().reads(1008_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListBags` (r:1 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:783 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:781 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:781 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:781 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: `Staking::Validators` (r:395 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_snapshot_rest() -> Weight { + // Proof Size summary in bytes: + // Measured: `1460558` + // Estimated: `3399473` + // Minimum execution time: 49_612_007_000 picoseconds. + Weight::from_parts(49_882_360_000, 3399473) + .saturating_add(RocksDbWeight::get().reads(3530_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:783 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:781 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:781 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:781 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6358acd2035ec4bb863fa981e0c177b9` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6358acd2035ec4bb863fa981e0c177b9` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `1585637` + // Estimated: `3524552` + // Minimum execution time: 50_523_714_000 picoseconds. + Weight::from_parts(50_697_786_000, 3524552) + .saturating_add(RocksDbWeight::get().reads(3135_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_into_signed_validation() -> Weight { + // Proof Size summary in bytes: + // Measured: `335` + // Estimated: `3800` + // Minimum execution time: 3_302_514_000 picoseconds. + Weight::from_parts(3_390_244_000, 3800) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + fn on_initialize_into_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `331` + // Estimated: `3796` + // Minimum execution time: 3_348_884_000 picoseconds. + Weight::from_parts(3_394_384_000, 3796) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:829 w:829) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:829 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:829) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:806) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn export_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `168548` + // Estimated: `2221313` + // Minimum execution time: 23_179_673_000 picoseconds. + Weight::from_parts(23_408_295_000, 2221313) + .saturating_add(RocksDbWeight::get().reads(1666_u64)) + .saturating_add(RocksDbWeight::get().writes(2467_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:16 w:16) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::Round` (r:1 w:1) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:16 w:16) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:16 w:16) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:955 w:955) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:955 w:956) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:955 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:955) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:0 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + fn export_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1918258` + // Estimated: `4282873` + // Minimum execution time: 50_591_761_000 picoseconds. + Weight::from_parts(50_753_934_000, 4282873) + .saturating_add(RocksDbWeight::get().reads(2923_u64)) + .saturating_add(RocksDbWeight::get().writes(2924_u64)) + } + fn manage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 120_000 picoseconds. + Weight::from_parts(140_000, 0) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_signed.rs b/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_signed.rs new file mode 100644 index 0000000000000..bee8030f3b7a1 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_signed.rs @@ -0,0 +1,357 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block_signed` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-04-17, STEPS: `5`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` + +// Executed Command: +// ../../../../../target/release/frame-omni-bencher +// v1 +// benchmark +// pallet +// --pallet +// pallet_election_provider_multi_block_signed +// --extrinsic +// all +// --runtime +// ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm +// --steps +// 5 +// --repeat +// 10 +// --genesis-builder-preset +// ksm_size +// --template +// ../../../../../substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --output +// ./pallet_election_provider_multi_block_signed_ksm_size.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block_signed`. +pub trait WeightInfo { + fn register_not_full() -> Weight; + fn register_eject() -> Weight; + fn submit_page() -> Weight; + fn unset_page() -> Weight; + fn bail() -> Weight; + fn clear_old_round_data(p: u32, ) -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block_signed` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + fn register_not_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `3140` + // Estimated: `6605` + // Minimum execution time: 138_561_000 picoseconds. + Weight::from_parts(140_321_000, 6605) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + fn register_eject() -> Weight { + // Proof Size summary in bytes: + // Measured: `7072` + // Estimated: `47662` + // Minimum execution time: 303_332_000 picoseconds. + Weight::from_parts(308_642_000, 47662) + .saturating_add(T::DbWeight::get().reads(23_u64)) + .saturating_add(T::DbWeight::get().writes(21_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + fn submit_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `3647` + // Estimated: `7112` + // Minimum execution time: 3_482_987_000 picoseconds. + Weight::from_parts(3_499_827_000, 7112) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + fn unset_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `17796` + // Estimated: `21261` + // Minimum execution time: 3_368_565_000 picoseconds. + Weight::from_parts(4_187_231_000, 21261) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + fn bail() -> Weight { + // Proof Size summary in bytes: + // Measured: `4070` + // Estimated: `44660` + // Minimum execution time: 197_152_000 picoseconds. + Weight::from_parts(198_881_000, 44660) + .saturating_add(T::DbWeight::get().reads(22_u64)) + .saturating_add(T::DbWeight::get().writes(19_u64)) + } + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// The range of component `p` is `[1, 16]`. + fn clear_old_round_data(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3582 + p * (31 ±0)` + // Estimated: `7049 + p * (2507 ±0)` + // Minimum execution time: 139_501_000 picoseconds. + Weight::from_parts(140_350_523, 7049) + // Standard Error: 24_519 + .saturating_add(Weight::from_parts(1_655_732, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 2507).saturating_mul(p.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + fn register_not_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `3140` + // Estimated: `6605` + // Minimum execution time: 138_561_000 picoseconds. + Weight::from_parts(140_321_000, 6605) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + fn register_eject() -> Weight { + // Proof Size summary in bytes: + // Measured: `7072` + // Estimated: `47662` + // Minimum execution time: 303_332_000 picoseconds. + Weight::from_parts(308_642_000, 47662) + .saturating_add(RocksDbWeight::get().reads(23_u64)) + .saturating_add(RocksDbWeight::get().writes(21_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + fn submit_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `3647` + // Estimated: `7112` + // Minimum execution time: 3_482_987_000 picoseconds. + Weight::from_parts(3_499_827_000, 7112) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + fn unset_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `17796` + // Estimated: `21261` + // Minimum execution time: 3_368_565_000 picoseconds. + Weight::from_parts(4_187_231_000, 21261) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + fn bail() -> Weight { + // Proof Size summary in bytes: + // Measured: `4070` + // Estimated: `44660` + // Minimum execution time: 197_152_000 picoseconds. + Weight::from_parts(198_881_000, 44660) + .saturating_add(RocksDbWeight::get().reads(22_u64)) + .saturating_add(RocksDbWeight::get().writes(19_u64)) + } + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// The range of component `p` is `[1, 16]`. + fn clear_old_round_data(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3582 + p * (31 ±0)` + // Estimated: `7049 + p * (2507 ±0)` + // Minimum execution time: 139_501_000 picoseconds. + Weight::from_parts(140_350_523, 7049) + // Standard Error: 24_519 + .saturating_add(Weight::from_parts(1_655_732, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 2507).saturating_mul(p.into())) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_unsigned.rs b/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_unsigned.rs new file mode 100644 index 0000000000000..743d0ba2b3da1 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_unsigned.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block::unsigned` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-25, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/substrate-node +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_election_provider_multi_block::unsigned +// --extrinsic +// all +// --steps +// 2 +// --repeat +// 3 +// --template +// substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --default-pov-mode +// measured +// --output +// ../measured + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block::unsigned`. +pub trait WeightInfo { + fn validate_unsigned() -> Weight; + fn submit_unsigned() -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block::unsigned` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + fn validate_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `364` + // Estimated: `1849` + // Minimum execution time: 103_121_000 picoseconds. + Weight::from_parts(106_290_000, 1849) + .saturating_add(T::DbWeight::get().reads(5_u64)) + } + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn submit_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `61621` + // Estimated: `65086` + // Minimum execution time: 4_720_814_000 picoseconds. + Weight::from_parts(5_716_078_000, 65086) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + fn validate_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `364` + // Estimated: `1849` + // Minimum execution time: 103_121_000 picoseconds. + Weight::from_parts(106_290_000, 1849) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + } + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn submit_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `61621` + // Estimated: `65086` + // Minimum execution time: 4_720_814_000 picoseconds. + Weight::from_parts(5_716_078_000, 65086) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_verifier.rs b/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_verifier.rs new file mode 100644 index 0000000000000..b910646307ada --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/kusama/measured/pallet_election_provider_multi_block_verifier.rs @@ -0,0 +1,423 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block_verifier` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-04-17, STEPS: `5`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` + +// Executed Command: +// ../../../../../target/release/frame-omni-bencher +// v1 +// benchmark +// pallet +// --pallet +// pallet_election_provider_multi_block_verifier +// --extrinsic +// all +// --runtime +// ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm +// --steps +// 5 +// --repeat +// 10 +// --genesis-builder-preset +// ksm_size +// --template +// ../../../../../substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --output +// ./pallet_election_provider_multi_block_verifier_ksm_size.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block_verifier`. +pub trait WeightInfo { + fn on_initialize_valid_non_terminal() -> Weight; + fn on_initialize_valid_terminal() -> Weight; + fn on_initialize_invalid_terminal() -> Weight; + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block_verifier` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + fn on_initialize_valid_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `283185` + // Estimated: `286650` + // Minimum execution time: 6_624_600_000 picoseconds. + Weight::from_parts(8_167_211_000, 286650) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:17 w:16) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + fn on_initialize_valid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1196794` + // Estimated: `1239859` + // Minimum execution time: 31_557_875_000 picoseconds. + Weight::from_parts(31_820_575_000, 1239859) + .saturating_add(T::DbWeight::get().reads(48_u64)) + .saturating_add(T::DbWeight::get().writes(40_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:17 w:16) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:15 w:16) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_invalid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1197143` + // Estimated: `1240208` + // Minimum execution time: 31_544_642_000 picoseconds. + Weight::from_parts(31_777_605_000, 1240208) + .saturating_add(T::DbWeight::get().reads(62_u64)) + .saturating_add(T::DbWeight::get().writes(53_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:15 w:15) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:15 w:15) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// The range of component `v` is `[0, 15]`. + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `286810 + v * (15242 ±0)` + // Estimated: `339803 + v * (16209 ±1_588)` + // Minimum execution time: 1_378_480_000 picoseconds. + Weight::from_parts(3_834_433_376, 339803) + .saturating_add(T::DbWeight::get().reads(30_u64)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(21_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 16209).saturating_mul(v.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + fn on_initialize_valid_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `283185` + // Estimated: `286650` + // Minimum execution time: 6_624_600_000 picoseconds. + Weight::from_parts(8_167_211_000, 286650) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:17 w:16) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + fn on_initialize_valid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1196794` + // Estimated: `1239859` + // Minimum execution time: 31_557_875_000 picoseconds. + Weight::from_parts(31_820_575_000, 1239859) + .saturating_add(RocksDbWeight::get().reads(48_u64)) + .saturating_add(RocksDbWeight::get().writes(40_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:17 w:16) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:15 w:16) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_invalid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1197143` + // Estimated: `1240208` + // Minimum execution time: 31_544_642_000 picoseconds. + Weight::from_parts(31_777_605_000, 1240208) + .saturating_add(RocksDbWeight::get().reads(62_u64)) + .saturating_add(RocksDbWeight::get().writes(53_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:16 w:16) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(50064), added: 52539, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(431907), added: 434382, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:15 w:15) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(37538014), added: 37540489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:15 w:15) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(165), added: 2640, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// The range of component `v` is `[0, 15]`. + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `286810 + v * (15242 ±0)` + // Estimated: `339803 + v * (16209 ±1_588)` + // Minimum execution time: 1_378_480_000 picoseconds. + Weight::from_parts(3_834_433_376, 339803) + .saturating_add(RocksDbWeight::get().reads(30_u64)) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(21_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 16209).saturating_mul(v.into())) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs new file mode 100644 index 0000000000000..3050fc7e7f195 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs @@ -0,0 +1,21 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod pallet_election_provider_multi_block; +pub mod pallet_election_provider_multi_block_signed; +pub mod pallet_election_provider_multi_block_unsigned; +pub mod pallet_election_provider_multi_block_verifier; diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs new file mode 100644 index 0000000000000..485e842a8bbba --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs @@ -0,0 +1,479 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-19, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/substrate-node +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_election_provider_multi_block +// --extrinsic +// all +// --steps +// 2 +// --repeat +// 3 +// --template +// substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --default-pov-mode +// measured +// --output +// ../measured + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block`. +pub trait WeightInfo { + fn on_initialize_nothing() -> Weight; + fn on_initialize_into_snapshot_msp() -> Weight; + fn on_initialize_into_snapshot_rest() -> Weight; + fn on_initialize_into_signed() -> Weight; + fn on_initialize_into_signed_validation() -> Weight; + fn on_initialize_into_unsigned() -> Weight; + fn export_non_terminal() -> Weight; + fn export_terminal() -> Weight; + fn manage() -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + fn on_initialize_nothing() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `1641` + // Minimum execution time: 10_046_000 picoseconds. + Weight::from_parts(10_295_000, 1641) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + fn on_initialize_into_snapshot_msp() -> Weight { + // Proof Size summary in bytes: + // Measured: `48466` + // Estimated: `2526931` + // Minimum execution time: 5_056_542_000 picoseconds. + Weight::from_parts(5_066_217_000, 2526931) + .saturating_add(T::DbWeight::get().reads(1005_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListBags` (r:125 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:353 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:351 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:351 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:351 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: `Staking::Validators` (r:345 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_snapshot_rest() -> Weight { + // Proof Size summary in bytes: + // Measured: `792084` + // Estimated: `1666749` + // Minimum execution time: 15_960_326_000 picoseconds. + Weight::from_parts(16_749_783_000, 1666749) + .saturating_add(T::DbWeight::get().reads(1880_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + fn on_initialize_into_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `1825` + // Minimum execution time: 68_564_000 picoseconds. + Weight::from_parts(187_695_000, 1825) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + fn on_initialize_into_signed_validation() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `3805` + // Minimum execution time: 119_762_000 picoseconds. + Weight::from_parts(132_912_000, 3805) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + fn on_initialize_into_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `1825` + // Minimum execution time: 101_013_000 picoseconds. + Weight::from_parts(178_330_000, 1825) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:127 w:127) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:127 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:127) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:19) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn export_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `36590` + // Estimated: `351905` + // Minimum execution time: 1_616_533_000 picoseconds. + Weight::from_parts(1_632_918_000, 351905) + .saturating_add(T::DbWeight::get().reads(260_u64)) + .saturating_add(T::DbWeight::get().writes(276_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:64 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:1) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:64 w:64) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:64 w:64) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:53 w:53) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:53 w:53) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:53 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:53) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:0 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + fn export_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `129036` + // Estimated: `288426` + // Minimum execution time: 2_158_357_000 picoseconds. + Weight::from_parts(2_167_039_000, 288426) + .saturating_add(T::DbWeight::get().reads(359_u64)) + .saturating_add(T::DbWeight::get().writes(361_u64)) + } + fn manage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 120_000 picoseconds. + Weight::from_parts(190_000, 0) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + fn on_initialize_nothing() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `1641` + // Minimum execution time: 10_046_000 picoseconds. + Weight::from_parts(10_295_000, 1641) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + fn on_initialize_into_snapshot_msp() -> Weight { + // Proof Size summary in bytes: + // Measured: `48466` + // Estimated: `2526931` + // Minimum execution time: 5_056_542_000 picoseconds. + Weight::from_parts(5_066_217_000, 2526931) + .saturating_add(RocksDbWeight::get().reads(1005_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListBags` (r:125 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:353 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:351 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:351 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:351 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: `Staking::Validators` (r:345 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_snapshot_rest() -> Weight { + // Proof Size summary in bytes: + // Measured: `792084` + // Estimated: `1666749` + // Minimum execution time: 15_960_326_000 picoseconds. + Weight::from_parts(16_749_783_000, 1666749) + .saturating_add(RocksDbWeight::get().reads(1880_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + fn on_initialize_into_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `1825` + // Minimum execution time: 68_564_000 picoseconds. + Weight::from_parts(187_695_000, 1825) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + fn on_initialize_into_signed_validation() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `3805` + // Minimum execution time: 119_762_000 picoseconds. + Weight::from_parts(132_912_000, 3805) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + fn on_initialize_into_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `1825` + // Minimum execution time: 101_013_000 picoseconds. + Weight::from_parts(178_330_000, 1825) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:127 w:127) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:127 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:127) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:19) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn export_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `36590` + // Estimated: `351905` + // Minimum execution time: 1_616_533_000 picoseconds. + Weight::from_parts(1_632_918_000, 351905) + .saturating_add(RocksDbWeight::get().reads(260_u64)) + .saturating_add(RocksDbWeight::get().writes(276_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:64 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:1) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:64 w:64) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:64 w:64) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:53 w:53) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:53 w:53) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:53 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:53) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:0 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + fn export_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `129036` + // Estimated: `288426` + // Minimum execution time: 2_158_357_000 picoseconds. + Weight::from_parts(2_167_039_000, 288426) + .saturating_add(RocksDbWeight::get().reads(359_u64)) + .saturating_add(RocksDbWeight::get().writes(361_u64)) + } + fn manage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 120_000 picoseconds. + Weight::from_parts(190_000, 0) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs new file mode 100644 index 0000000000000..6641efd12955e --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs @@ -0,0 +1,280 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block::signed` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-18, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/substrate-node +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_election_provider_multi_block::signed +// --extrinsic +// all +// --steps +// 2 +// --repeat +// 3 +// --template +// substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --default-pov-mode +// measured +// --output +// ../measured + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block::signed`. +pub trait WeightInfo { + fn register_not_full() -> Weight; + fn register_eject() -> Weight; + fn submit_page() -> Weight; + fn unset_page() -> Weight; + fn bail() -> Weight; + fn clear_old_round_data(p: u32) -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block::signed` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + fn register_not_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `3043` + // Estimated: `6508` + // Minimum execution time: 59_691_000 picoseconds. + Weight::from_parts(60_532_000, 6508) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + fn register_eject() -> Weight { + // Proof Size summary in bytes: + // Measured: `8691` + // Estimated: `168081` + // Minimum execution time: 181_556_000 picoseconds. + Weight::from_parts(182_247_000, 168081) + .saturating_add(T::DbWeight::get().reads(70_u64)) + .saturating_add(T::DbWeight::get().writes(69_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + fn submit_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `3492` + // Estimated: `6957` + // Minimum execution time: 945_345_000 picoseconds. + Weight::from_parts(1_505_947_000, 6957) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + fn unset_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `3832` + // Estimated: `7297` + // Minimum execution time: 123_338_000 picoseconds. + Weight::from_parts(127_173_000, 7297) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + fn bail() -> Weight { + // Proof Size summary in bytes: + // Measured: `5557` + // Estimated: `164947` + // Minimum execution time: 147_695_000 picoseconds. + Weight::from_parts(151_360_000, 164947) + .saturating_add(T::DbWeight::get().reads(69_u64)) + .saturating_add(T::DbWeight::get().writes(67_u64)) + } + fn clear_old_round_data(p: u32) -> Weight { + Default::default() + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + fn register_not_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `3043` + // Estimated: `6508` + // Minimum execution time: 59_691_000 picoseconds. + Weight::from_parts(60_532_000, 6508) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + fn register_eject() -> Weight { + // Proof Size summary in bytes: + // Measured: `8691` + // Estimated: `168081` + // Minimum execution time: 181_556_000 picoseconds. + Weight::from_parts(182_247_000, 168081) + .saturating_add(RocksDbWeight::get().reads(70_u64)) + .saturating_add(RocksDbWeight::get().writes(69_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + fn submit_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `3492` + // Estimated: `6957` + // Minimum execution time: 945_345_000 picoseconds. + Weight::from_parts(1_505_947_000, 6957) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + fn unset_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `3832` + // Estimated: `7297` + // Minimum execution time: 123_338_000 picoseconds. + Weight::from_parts(127_173_000, 7297) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + fn bail() -> Weight { + // Proof Size summary in bytes: + // Measured: `5557` + // Estimated: `164947` + // Minimum execution time: 147_695_000 picoseconds. + Weight::from_parts(151_360_000, 164947) + .saturating_add(RocksDbWeight::get().reads(69_u64)) + .saturating_add(RocksDbWeight::get().writes(67_u64)) + } + fn clear_old_round_data(p: u32) -> Weight { + Default::default() + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs new file mode 100644 index 0000000000000..ddcd34ad3d7aa --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block::unsigned` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-18, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/substrate-node +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_election_provider_multi_block::unsigned +// --extrinsic +// all +// --steps +// 2 +// --repeat +// 3 +// --template +// substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --default-pov-mode +// measured +// --output +// ../measured + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block::unsigned`. +pub trait WeightInfo { + fn validate_unsigned() -> Weight; + fn submit_unsigned() -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block::unsigned` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + fn validate_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `364` + // Estimated: `1849` + // Minimum execution time: 51_839_000 picoseconds. + Weight::from_parts(52_049_000, 1849) + .saturating_add(T::DbWeight::get().reads(5_u64)) + } + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn submit_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `61621` + // Estimated: `65086` + // Minimum execution time: 1_936_469_000 picoseconds. + Weight::from_parts(2_505_223_000, 65086) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + fn validate_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `364` + // Estimated: `1849` + // Minimum execution time: 51_839_000 picoseconds. + Weight::from_parts(52_049_000, 1849) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + } + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn submit_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `61621` + // Estimated: `65086` + // Minimum execution time: 1_936_469_000 picoseconds. + Weight::from_parts(2_505_223_000, 65086) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs new file mode 100644 index 0000000000000..cbc6aa21da4ca --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs @@ -0,0 +1,362 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block::verifier` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-18, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/substrate-node +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_election_provider_multi_block::verifier +// --extrinsic +// all +// --steps +// 2 +// --repeat +// 3 +// --template +// substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --default-pov-mode +// measured +// --output +// ../measured + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block::verifier`. +pub trait WeightInfo { + fn on_initialize_valid_non_terminal() -> Weight; + fn on_initialize_valid_terminal() -> Weight; + fn on_initialize_invalid_terminal() -> Weight; + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block::verifier` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + fn on_initialize_valid_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `63117` + // Estimated: `66582` + // Minimum execution time: 445_166_000 picoseconds. + Weight::from_parts(445_557_000, 66582) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:65 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn on_initialize_valid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1030680` + // Estimated: `1192545` + // Minimum execution time: 2_328_715_000 picoseconds. + Weight::from_parts(2_347_354_000, 1192545) + .saturating_add(T::DbWeight::get().reads(140_u64)) + .saturating_add(T::DbWeight::get().writes(135_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:65 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:63 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn on_initialize_invalid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1033154` + // Estimated: `1195019` + // Minimum execution time: 2_411_702_000 picoseconds. + Weight::from_parts(2_418_792_000, 1195019) + .saturating_add(T::DbWeight::get().reads(202_u64)) + .saturating_add(T::DbWeight::get().writes(196_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:63 w:63) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:63 w:63) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// The range of component `v` is `[0, 63]`. + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `68609 + v * (2711 ±0)` + // Estimated: `227999 + v * (6290 ±0)` + // Minimum execution time: 457_094_000 picoseconds. + Weight::from_parts(462_712_666, 227999) + // Standard Error: 59_963 + .saturating_add(Weight::from_parts(3_863_693, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(74_u64)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(68_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 6290).saturating_mul(v.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + fn on_initialize_valid_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `63117` + // Estimated: `66582` + // Minimum execution time: 445_166_000 picoseconds. + Weight::from_parts(445_557_000, 66582) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:65 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn on_initialize_valid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1030680` + // Estimated: `1192545` + // Minimum execution time: 2_328_715_000 picoseconds. + Weight::from_parts(2_347_354_000, 1192545) + .saturating_add(RocksDbWeight::get().reads(140_u64)) + .saturating_add(RocksDbWeight::get().writes(135_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:65 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:63 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn on_initialize_invalid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1033154` + // Estimated: `1195019` + // Minimum execution time: 2_411_702_000 picoseconds. + Weight::from_parts(2_418_792_000, 1195019) + .saturating_add(RocksDbWeight::get().reads(202_u64)) + .saturating_add(RocksDbWeight::get().writes(196_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:63 w:63) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:63 w:63) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// The range of component `v` is `[0, 63]`. + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `68609 + v * (2711 ±0)` + // Estimated: `227999 + v * (6290 ±0)` + // Minimum execution time: 457_094_000 picoseconds. + Weight::from_parts(462_712_666, 227999) + // Standard Error: 59_963 + .saturating_add(Weight::from_parts(3_863_693, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(74_u64)) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(68_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 6290).saturating_mul(v.into())) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/mod.rs b/substrate/frame/election-provider-multi-block/src/weights/mod.rs new file mode 100644 index 0000000000000..0b6d2ed48b8da --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/mod.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Weights for the election provider multi-block pallet. + +#![allow(unused)] +pub mod measured; +pub mod zero; +pub use zero::AllZeroWeights; diff --git a/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block.rs b/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block.rs new file mode 100644 index 0000000000000..4492372c466be --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block.rs @@ -0,0 +1,630 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-04-17, STEPS: `5`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` + +// Executed Command: +// ../../../../../target/release/frame-omni-bencher +// v1 +// benchmark +// pallet +// --pallet +// pallet_election_provider_multi_block +// --extrinsic +// all +// --runtime +// ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm +// --steps +// 5 +// --repeat +// 10 +// --genesis-builder-preset +// dot_size +// --template +// ../../../../../substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --output +// ./pallet_election_provider_multi_block_dot_size.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block`. +pub trait WeightInfo { + fn on_initialize_nothing() -> Weight; + fn on_initialize_into_snapshot_msp() -> Weight; + fn on_initialize_into_snapshot_rest() -> Weight; + fn on_initialize_into_signed() -> Weight; + fn on_initialize_into_signed_validation() -> Weight; + fn on_initialize_into_unsigned() -> Weight; + fn export_non_terminal() -> Weight; + fn export_terminal() -> Weight; + fn manage() -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_nothing() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_110_000 picoseconds. + Weight::from_parts(25_870_000, 3612) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + fn on_initialize_into_snapshot_msp() -> Weight { + // Proof Size summary in bytes: + // Measured: `48347` + // Estimated: `2526812` + // Minimum execution time: 9_270_059_000 picoseconds. + Weight::from_parts(9_288_159_000, 2526812) + .saturating_add(T::DbWeight::get().reads(1008_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListBags` (r:1 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:705 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:703 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:703 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:703 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: `Staking::Validators` (r:216 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_snapshot_rest() -> Weight { + // Proof Size summary in bytes: + // Measured: `1465524` + // Estimated: `3211389` + // Minimum execution time: 49_545_476_000 picoseconds. + Weight::from_parts(49_841_748_000, 3211389) + .saturating_add(T::DbWeight::get().reads(3039_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:705 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:703 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:703 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:703 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: `VoterList::ListBags` (r:1 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) + /// Storage: `Staking::Validators` (r:38 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6358acd2035ec4bb863fa981e0c177b9` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6358acd2035ec4bb863fa981e0c177b9` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `1527754` + // Estimated: `3273619` + // Minimum execution time: 49_782_868_000 picoseconds. + Weight::from_parts(51_099_387_000, 3273619) + .saturating_add(T::DbWeight::get().reads(2862_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_into_signed_validation() -> Weight { + // Proof Size summary in bytes: + // Measured: `335` + // Estimated: `3800` + // Minimum execution time: 3_747_608_000 picoseconds. + Weight::from_parts(3_844_358_000, 3800) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + fn on_initialize_into_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `331` + // Estimated: `3796` + // Minimum execution time: 3_312_145_000 picoseconds. + Weight::from_parts(3_861_008_000, 3796) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(33794014), added: 33796489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:491 w:491) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:491 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:491) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:490) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn export_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `169908` + // Estimated: `1386123` + // Minimum execution time: 13_491_209_000 picoseconds. + Weight::from_parts(13_543_270_000, 1386123) + .saturating_add(T::DbWeight::get().reads(990_u64)) + .saturating_add(T::DbWeight::get().writes(1475_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:32 w:32) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(33794014), added: 33796489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::Round` (r:1 w:1) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:32 w:32) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:32 w:32) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:498 w:498) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:498 w:511) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:498 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:498) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:0 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + fn export_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1129915` + // Estimated: `2363455` + // Minimum execution time: 28_609_419_000 picoseconds. + Weight::from_parts(30_110_591_000, 2363455) + .saturating_add(T::DbWeight::get().reads(1600_u64)) + .saturating_add(T::DbWeight::get().writes(1613_u64)) + } + fn manage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 100_000 picoseconds. + Weight::from_parts(120_000, 0) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_nothing() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 25_110_000 picoseconds. + Weight::from_parts(25_870_000, 3612) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + fn on_initialize_into_snapshot_msp() -> Weight { + // Proof Size summary in bytes: + // Measured: `48347` + // Estimated: `2526812` + // Minimum execution time: 9_270_059_000 picoseconds. + Weight::from_parts(9_288_159_000, 2526812) + .saturating_add(RocksDbWeight::get().reads(1008_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListBags` (r:1 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:705 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:703 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:703 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:703 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: `Staking::Validators` (r:216 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_snapshot_rest() -> Weight { + // Proof Size summary in bytes: + // Measured: `1465524` + // Estimated: `3211389` + // Minimum execution time: 49_545_476_000 picoseconds. + Weight::from_parts(49_841_748_000, 3211389) + .saturating_add(RocksDbWeight::get().reads(3039_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `VoterList::ListNodes` (r:705 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:703 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) + /// Storage: `Staking::Ledger` (r:703 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `Measured`) + /// Storage: `Staking::Nominators` (r:703 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) + /// Storage: `VoterList::ListBags` (r:1 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) + /// Storage: `Staking::Validators` (r:38 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6358acd2035ec4bb863fa981e0c177b9` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6358acd2035ec4bb863fa981e0c177b9` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) + /// Storage: `VoterList::Lock` (r:0 w:1) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `Measured`) + fn on_initialize_into_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `1527754` + // Estimated: `3273619` + // Minimum execution time: 49_782_868_000 picoseconds. + Weight::from_parts(51_099_387_000, 3273619) + .saturating_add(RocksDbWeight::get().reads(2862_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn on_initialize_into_signed_validation() -> Weight { + // Proof Size summary in bytes: + // Measured: `335` + // Estimated: `3800` + // Minimum execution time: 3_747_608_000 picoseconds. + Weight::from_parts(3_844_358_000, 3800) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48384a816e4f71a936cb76dc9e303f2a` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + fn on_initialize_into_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `331` + // Estimated: `3796` + // Minimum execution time: 3_312_145_000 picoseconds. + Weight::from_parts(3_861_008_000, 3796) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(33794014), added: 33796489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:491 w:491) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:491 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:491) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:490) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn export_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `169908` + // Estimated: `1386123` + // Minimum execution time: 13_491_209_000 picoseconds. + Weight::from_parts(13_543_270_000, 1386123) + .saturating_add(RocksDbWeight::get().reads(990_u64)) + .saturating_add(RocksDbWeight::get().writes(1475_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:32 w:32) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(33794014), added: 33796489, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::Round` (r:1 w:1) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:32 w:32) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:32 w:32) + /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:1 w:1) + /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ElectableStashes` (r:1 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:498 w:498) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `Measured`) + /// Storage: `Staking::ErasStakersPaged` (r:498 w:511) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `Measured`) + /// Storage: `Staking::Validators` (r:498 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:498) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:0 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + fn export_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1129915` + // Estimated: `2363455` + // Minimum execution time: 28_609_419_000 picoseconds. + Weight::from_parts(30_110_591_000, 2363455) + .saturating_add(RocksDbWeight::get().reads(1600_u64)) + .saturating_add(RocksDbWeight::get().writes(1613_u64)) + } + fn manage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 100_000 picoseconds. + Weight::from_parts(120_000, 0) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_signed.rs b/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_signed.rs new file mode 100644 index 0000000000000..919021ce23fde --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_signed.rs @@ -0,0 +1,357 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block_signed` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-04-17, STEPS: `5`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` + +// Executed Command: +// ../../../../../target/release/frame-omni-bencher +// v1 +// benchmark +// pallet +// --pallet +// pallet_election_provider_multi_block_signed +// --extrinsic +// all +// --runtime +// ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm +// --steps +// 5 +// --repeat +// 10 +// --genesis-builder-preset +// dot_size +// --template +// ../../../../../substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --output +// ./pallet_election_provider_multi_block_signed_dot_size.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block_signed`. +pub trait WeightInfo { + fn register_not_full() -> Weight; + fn register_eject() -> Weight; + fn submit_page() -> Weight; + fn unset_page() -> Weight; + fn bail() -> Weight; + fn clear_old_round_data(p: u32, ) -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block_signed` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + fn register_not_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `3074` + // Estimated: `6539` + // Minimum execution time: 138_231_000 picoseconds. + Weight::from_parts(140_311_000, 6539) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + fn register_eject() -> Weight { + // Proof Size summary in bytes: + // Measured: `7674` + // Estimated: `87864` + // Minimum execution time: 336_232_000 picoseconds. + Weight::from_parts(345_393_000, 87864) + .saturating_add(T::DbWeight::get().reads(39_u64)) + .saturating_add(T::DbWeight::get().writes(37_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + fn submit_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `3597` + // Estimated: `7062` + // Minimum execution time: 5_572_100_000 picoseconds. + Weight::from_parts(6_485_657_000, 7062) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + fn unset_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `15527` + // Estimated: `18992` + // Minimum execution time: 6_247_565_000 picoseconds. + Weight::from_parts(7_103_552_000, 18992) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + fn bail() -> Weight { + // Proof Size summary in bytes: + // Measured: `4539` + // Estimated: `84729` + // Minimum execution time: 229_802_000 picoseconds. + Weight::from_parts(231_182_000, 84729) + .saturating_add(T::DbWeight::get().reads(38_u64)) + .saturating_add(T::DbWeight::get().writes(35_u64)) + } + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// The range of component `p` is `[1, 32]`. + fn clear_old_round_data(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3529 + p * (32 ±0)` + // Estimated: `6994 + p * (2507 ±0)` + // Minimum execution time: 138_841_000 picoseconds. + Weight::from_parts(138_388_455, 6994) + // Standard Error: 23_911 + .saturating_add(Weight::from_parts(1_930_778, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 2507).saturating_mul(p.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + fn register_not_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `3074` + // Estimated: `6539` + // Minimum execution time: 138_231_000 picoseconds. + Weight::from_parts(140_311_000, 6539) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + fn register_eject() -> Weight { + // Proof Size summary in bytes: + // Measured: `7674` + // Estimated: `87864` + // Minimum execution time: 336_232_000 picoseconds. + Weight::from_parts(345_393_000, 87864) + .saturating_add(RocksDbWeight::get().reads(39_u64)) + .saturating_add(RocksDbWeight::get().writes(37_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + fn submit_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `3597` + // Estimated: `7062` + // Minimum execution time: 5_572_100_000 picoseconds. + Weight::from_parts(6_485_657_000, 7062) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + fn unset_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `15527` + // Estimated: `18992` + // Minimum execution time: 6_247_565_000 picoseconds. + Weight::from_parts(7_103_552_000, 18992) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + fn bail() -> Weight { + // Proof Size summary in bytes: + // Measured: `4539` + // Estimated: `84729` + // Minimum execution time: 229_802_000 picoseconds. + Weight::from_parts(231_182_000, 84729) + .saturating_add(RocksDbWeight::get().reads(38_u64)) + .saturating_add(RocksDbWeight::get().writes(35_u64)) + } + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(45072), added: 47547, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `Measured`) + /// The range of component `p` is `[1, 32]`. + fn clear_old_round_data(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3529 + p * (32 ±0)` + // Estimated: `6994 + p * (2507 ±0)` + // Minimum execution time: 138_841_000 picoseconds. + Weight::from_parts(138_388_455, 6994) + // Standard Error: 23_911 + .saturating_add(Weight::from_parts(1_930_778, 0).saturating_mul(p.into())) + .saturating_add(RocksDbWeight::get().reads(5_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 2507).saturating_mul(p.into())) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_unsigned.rs b/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_unsigned.rs new file mode 100644 index 0000000000000..3311e86cd0b6a --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_unsigned.rs @@ -0,0 +1,171 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block_unsigned` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-04-17, STEPS: `5`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` + +// Executed Command: +// ../../../../../target/release/frame-omni-bencher +// v1 +// benchmark +// pallet +// --pallet +// pallet_election_provider_multi_block_unsigned +// --extrinsic +// all +// --runtime +// ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm +// --steps +// 5 +// --repeat +// 10 +// --genesis-builder-preset +// dot_size +// --template +// ../../../../../substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --output +// ./pallet_election_provider_multi_block_unsigned_dot_size.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block_unsigned`. +pub trait WeightInfo { + fn validate_unsigned() -> Weight; + fn submit_unsigned() -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block_unsigned` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn validate_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3747` + // Minimum execution time: 3_411_885_000 picoseconds. + Weight::from_parts(4_180_130_000, 3747) + .saturating_add(T::DbWeight::get().reads(7_u64)) + } + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:2 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:2) + /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(33794014), added: 33796489, mode: `Measured`) + fn submit_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `706759` + // Estimated: `712699` + // Minimum execution time: 18_104_422_000 picoseconds. + Weight::from_parts(20_650_781_000, 712699) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xc209f5d8eb920681b56c64b8694ea78c` (r:1 w:0) + fn validate_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3747` + // Minimum execution time: 3_411_885_000 picoseconds. + Weight::from_parts(4_180_130_000, 3747) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + } + /// Storage: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xa143099d7a337c5fd879b91b2b157c2d` (r:1 w:0) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) + /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:2 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6f320d44e42312c78638e6c92dff65af` (r:1 w:0) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:2) + /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(33794014), added: 33796489, mode: `Measured`) + fn submit_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `706759` + // Estimated: `712699` + // Minimum execution time: 18_104_422_000 picoseconds. + Weight::from_parts(20_650_781_000, 712699) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_verifier.rs b/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_verifier.rs new file mode 100644 index 0000000000000..93546a54abea4 --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/polkadot/measured/pallet_election_provider_multi_block_verifier.rs @@ -0,0 +1,362 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_election_provider_multi_block::verifier` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-26, STEPS: `5`, REPEAT: `5`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/release/substrate-node +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_election_provider_multi_block::verifier +// --extrinsic +// all +// --steps +// 5 +// --repeat +// 5 +// --template +// substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --default-pov-mode +// measured +// --output +// ../measured + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_election_provider_multi_block::verifier`. +pub trait WeightInfo { + fn on_initialize_valid_non_terminal() -> Weight; + fn on_initialize_valid_terminal() -> Weight; + fn on_initialize_invalid_terminal() -> Weight; + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight; +} + +/// Weights for `pallet_election_provider_multi_block::verifier` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + fn on_initialize_valid_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `60247` + // Estimated: `63712` + // Minimum execution time: 714_425_000 picoseconds. + Weight::from_parts(828_435_000, 63712) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:65 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn on_initialize_valid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1607485` + // Estimated: `1769350` + // Minimum execution time: 7_054_514_000 picoseconds. + Weight::from_parts(7_256_456_000, 1769350) + .saturating_add(T::DbWeight::get().reads(140_u64)) + .saturating_add(T::DbWeight::get().writes(136_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:65 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:63 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn on_initialize_invalid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1609959` + // Estimated: `1771824` + // Minimum execution time: 7_130_125_000 picoseconds. + Weight::from_parts(7_169_825_000, 1771824) + .saturating_add(T::DbWeight::get().reads(202_u64)) + .saturating_add(T::DbWeight::get().writes(197_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:63 w:63) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:63 w:63) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// The range of component `v` is `[0, 63]`. + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `220123 + v * (402 ±0)` + // Estimated: `291348 + v * (2703 ±23)` + // Minimum execution time: 743_255_000 picoseconds. + Weight::from_parts(1_083_317_746, 291348) + // Standard Error: 3_705_126 + .saturating_add(Weight::from_parts(4_376_779, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(74_u64)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(69_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2703).saturating_mul(v.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + fn on_initialize_valid_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `60247` + // Estimated: `63712` + // Minimum execution time: 714_425_000 picoseconds. + Weight::from_parts(828_435_000, 63712) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:65 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn on_initialize_valid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1607485` + // Estimated: `1769350` + // Minimum execution time: 7_054_514_000 picoseconds. + Weight::from_parts(7_256_456_000, 1769350) + .saturating_add(RocksDbWeight::get().reads(140_u64)) + .saturating_add(RocksDbWeight::get().writes(136_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:65 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:63 w:64) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + fn on_initialize_invalid_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1609959` + // Estimated: `1771824` + // Minimum execution time: 7_130_125_000 picoseconds. + Weight::from_parts(7_169_825_000, 1771824) + .saturating_add(RocksDbWeight::get().reads(202_u64)) + .saturating_add(RocksDbWeight::get().writes(197_u64)) + } + /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) + /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) + /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) + /// Storage: `MultiBlock::Round` (r:1 w:0) + /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) + /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionStorage` (r:64 w:64) + /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(17279), added: 19754, mode: `Measured`) + /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) + /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) + /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(194117), added: 196592, mode: `Measured`) + /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) + /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) + /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:63 w:63) + /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) + /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:63 w:63) + /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) + /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) + /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(214), added: 2689, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) + /// The range of component `v` is `[0, 63]`. + fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `220123 + v * (402 ±0)` + // Estimated: `291348 + v * (2703 ±23)` + // Minimum execution time: 743_255_000 picoseconds. + Weight::from_parts(1_083_317_746, 291348) + // Standard Error: 3_705_126 + .saturating_add(Weight::from_parts(4_376_779, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(74_u64)) + .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(69_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2703).saturating_mul(v.into())) + } +} diff --git a/substrate/frame/election-provider-multi-block/src/weights/zero.rs b/substrate/frame/election-provider-multi-block/src/weights/zero.rs new file mode 100644 index 0000000000000..256f86890531c --- /dev/null +++ b/substrate/frame/election-provider-multi-block/src/weights/zero.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A set of zero weights for all benchmarks of this pallet to be temporarily used in testing +//! runtimes while benchmarking is being finalized. + +/// A `WeightInfo` impl with all zero weights +pub struct AllZeroWeights; +use frame_support::weights::Weight; + +impl crate::WeightInfo for AllZeroWeights { + fn export_non_terminal() -> Weight { + Default::default() + } + fn export_terminal() -> Weight { + Default::default() + } + fn manage() -> Weight { + Default::default() + } + fn on_initialize_into_signed() -> Weight { + Default::default() + } + fn on_initialize_into_signed_validation() -> Weight { + Default::default() + } + fn on_initialize_into_snapshot_msp() -> Weight { + Default::default() + } + fn on_initialize_into_snapshot_rest() -> Weight { + Default::default() + } + fn on_initialize_into_unsigned() -> Weight { + Default::default() + } + fn on_initialize_nothing() -> Weight { + Default::default() + } +} + +impl crate::signed::WeightInfo for AllZeroWeights { + fn bail() -> Weight { + Default::default() + } + fn register_eject() -> Weight { + Default::default() + } + fn register_not_full() -> Weight { + Default::default() + } + fn submit_page() -> Weight { + Default::default() + } + fn unset_page() -> Weight { + Default::default() + } + fn clear_old_round_data(p: u32) -> Weight { + Default::default() + } +} + +impl crate::unsigned::WeightInfo for AllZeroWeights { + fn submit_unsigned() -> Weight { + Default::default() + } + fn validate_unsigned() -> Weight { + Default::default() + } +} + +impl crate::verifier::WeightInfo for AllZeroWeights { + fn on_initialize_invalid_non_terminal(_: u32) -> Weight { + Default::default() + } + fn on_initialize_invalid_terminal() -> Weight { + Default::default() + } + fn on_initialize_valid_non_terminal() -> Weight { + Default::default() + } + fn on_initialize_valid_terminal() -> Weight { + Default::default() + } +} diff --git a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs index 222e79ab99c6c..a2289195fd662 100644 --- a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs @@ -288,9 +288,11 @@ mod benchmarks { ) -> Result<(), BenchmarkError> { // We don't directly need the data-provider to be populated, but it is just easy to use it. set_up_data_provider::(v, t); - // Default bounds are unbounded. - let targets = T::DataProvider::electable_targets(DataProviderBounds::default())?; - let voters = T::DataProvider::electing_voters(DataProviderBounds::default())?; + // default bounds are unbounded. + let targets = + T::DataProvider::electable_targets(DataProviderBounds::default(), Zero::zero())?; + let voters = T::DataProvider::electing_voters(DataProviderBounds::default(), Zero::zero())?; + let desired_targets = T::DataProvider::desired_targets()?; assert!(Snapshot::::get().is_none()); @@ -343,7 +345,7 @@ mod benchmarks { #[block] { - result = as ElectionProvider>::elect(); + result = as ElectionProvider>::elect(Zero::zero()); } assert!(result.is_ok()); diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 57bb54cc54551..33e7777665762 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -189,6 +189,18 @@ //! Note that there could be an overlap between these sub-errors. For example, A //! `SnapshotUnavailable` can happen in both miner and feasibility check phase. //! +//! ## Multi-page election support +//! +//! The [`frame_election_provider_support::ElectionDataProvider`] and +//! [`frame_election_provider_support::ElectionProvider`] traits used by this pallet can support a +//! multi-page election. +//! +//! However, this pallet only supports single-page election and data +//! provider and all the relevant trait implementation and configurations reflect that assumption. +//! +//! If external callers request the election of a page index higher than 0, the election will fail +//! with [`ElectionError::MultiPageNotSupported`]. +//! //! ## Future Plans //! //! **Emergency-phase recovery script**: This script should be taken out of staking-miner in @@ -234,14 +246,14 @@ extern crate alloc; use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, DecodeWithMemTracking, Encode}; use frame_election_provider_support::{ - bounds::{CountBound, ElectionBounds, ElectionBoundsBuilder, SizeBound}, - BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider, - ElectionProviderBase, InstantElectionProvider, NposSolution, + bounds::{CountBound, ElectionBounds, SizeBound}, + BoundedSupports, BoundedSupportsOf, ElectionDataProvider, ElectionProvider, + InstantElectionProvider, NposSolution, PageIndex, }; use frame_support::{ dispatch::DispatchClass, ensure, - traits::{Currency, DefensiveResult, Get, OnUnbalanced, ReservableCurrency}, + traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, weights::Weight, DefaultNoBound, EqNoBound, PartialEqNoBound, }; @@ -251,7 +263,7 @@ use sp_arithmetic::{ traits::{CheckedAdd, Zero}, UpperOf, }; -use sp_npos_elections::{BoundedSupports, ElectionScore, IdentifierT, Supports, VoteWeight}; +use sp_npos_elections::{ElectionScore, IdentifierT, Supports, VoteWeight}; use sp_runtime::{ transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, @@ -270,6 +282,8 @@ mod mock; #[macro_use] pub mod helpers; +/// This pallet only supports a single page election flow. +pub(crate) const SINGLE_PAGE: u32 = 0; const LOG_TARGET: &str = "runtime::election-provider"; pub mod migrations; @@ -287,7 +301,6 @@ pub use weights::WeightInfo; /// The solution type used by this crate. pub type SolutionOf = ::Solution; - /// The voter index. Derived from [`SolutionOf`]. pub type SolutionVoterIndexOf = as NposSolution>::VoterIndex; /// The target index. Derived from [`SolutionOf`]. @@ -295,8 +308,14 @@ pub type SolutionTargetIndexOf = as NposSolution>::TargetIndex /// The accuracy of the election, when submitted from offchain. Derived from [`SolutionOf`]. pub type SolutionAccuracyOf = ::MinerConfig> as NposSolution>::Accuracy; +/// A ready solution parameterized with this pallet's miner config. +pub type ReadySolutionOf = ReadySolution< + ::AccountId, + ::MaxWinners, + ::MaxBackersPerWinner, +>; /// The fallback election type. -pub type FallbackErrorOf = <::Fallback as ElectionProviderBase>::Error; +pub type FallbackErrorOf = <::Fallback as ElectionProvider>::Error; /// Configuration for the benchmarks of the pallet. pub trait BenchmarkingConfig { @@ -444,17 +463,18 @@ impl Default for RawSolution { DefaultNoBound, scale_info::TypeInfo, )] -#[scale_info(skip_type_params(AccountId, MaxWinners))] -pub struct ReadySolution +#[scale_info(skip_type_params(AccountId, MaxWinners, MaxBackersPerWinner))] +pub struct ReadySolution where AccountId: IdentifierT, MaxWinners: Get, + MaxBackersPerWinner: Get, { /// The final supports of the solution. /// /// This is target-major vector, storing each winners, total backing, and each individual /// backer. - pub supports: BoundedSupports, + pub supports: BoundedSupports, /// The score of the solution. /// /// This is needed to potentially challenge the solution. @@ -507,13 +527,15 @@ pub enum ElectionError { DataProvider(&'static str), /// An error nested in the fallback. Fallback(FallbackErrorOf), + /// An error occurred when requesting an election result. The caller expects a multi-paged + /// election, which this pallet does not support. + MultiPageNotSupported, /// No solution has been queued. NothingQueued, } // NOTE: we have to do this manually because of the additional where clause needed on // `FallbackErrorOf`. -#[cfg(test)] impl PartialEq for ElectionError where FallbackErrorOf: PartialEq, @@ -525,6 +547,8 @@ where (Miner(x), Miner(y)) if x == y => true, (DataProvider(x), DataProvider(y)) if x == y => true, (Fallback(x), Fallback(y)) if x == y => true, + (MultiPageNotSupported, MultiPageNotSupported) => true, + (NothingQueued, NothingQueued) => true, _ => false, } } @@ -630,6 +654,7 @@ pub mod pallet { AccountId = Self::AccountId, MaxVotesPerVoter = ::MaxVotesPerVoter, MaxWinners = Self::MaxWinners, + MaxBackersPerWinner = Self::MaxBackersPerWinner, >; /// Maximum number of signed submissions that can be queued. @@ -666,20 +691,23 @@ pub mod pallet { #[pallet::constant] type SignedDepositWeight: Get>; - /// The maximum number of winners that can be elected by this `ElectionProvider` - /// implementation. + /// Maximum number of winners that an election supports. /// /// Note: This must always be greater or equal to `T::DataProvider::desired_targets()`. #[pallet::constant] type MaxWinners: Get; + /// Maximum number of voters that can support a winner in an election solution. + /// + /// This is needed to ensure election computation is bounded. + #[pallet::constant] + type MaxBackersPerWinner: Get; + /// Something that calculates the signed deposit base based on the signed submissions queue /// size. type SignedDepositBase: Convert>; /// The maximum number of electing voters and electable targets to put in the snapshot. - /// At the moment, snapshots are only over a single block, but once multi-block elections - /// are introduced they will take place over multiple blocks. type ElectionBounds: Get; /// Handler for the slashed deposits. @@ -699,7 +727,8 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = BlockNumberFor, DataProvider = Self::DataProvider, - MaxWinners = Self::MaxWinners, + MaxBackersPerWinner = Self::MaxBackersPerWinner, + MaxWinnersPerPage = Self::MaxWinners, >; /// Configuration of the governance-only fallback. @@ -710,7 +739,8 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = BlockNumberFor, DataProvider = Self::DataProvider, - MaxWinners = Self::MaxWinners, + MaxWinnersPerPage = Self::MaxWinners, + MaxBackersPerWinner = Self::MaxBackersPerWinner, >; /// OCW election solution miner algorithm implementation. @@ -764,9 +794,10 @@ pub mod pallet { log!( trace, - "current phase {:?}, next election {:?}, metadata: {:?}", + "current phase {:?}, next election {:?}, queued? {:?}, metadata: {:?}", current_phase, next_election, + QueuedSolution::::get().map(|rs| (rs.supports.len(), rs.compute, rs.score)), SnapshotMetadata::::get() ); match current_phase { @@ -992,8 +1023,9 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; ensure!(CurrentPhase::::get().is_emergency(), Error::::CallNotAllowed); - // bound supports with T::MaxWinners - let supports = supports.try_into().map_err(|_| Error::::TooManyWinners)?; + // bound supports with T::MaxWinners. + let supports: BoundedSupportsOf = + supports.try_into().map_err(|_| Error::::TooManyWinners)?; // Note: we don't `rotate_round` at this point; the next call to // `ElectionProvider::elect` will succeed and take care of that. @@ -1096,35 +1128,21 @@ pub mod pallet { /// calling [`Call::set_emergency_election_result`]. #[pallet::call_index(4)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] - pub fn governance_fallback( - origin: OriginFor, - maybe_max_voters: Option, - maybe_max_targets: Option, - ) -> DispatchResult { + pub fn governance_fallback(origin: OriginFor) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; ensure!(CurrentPhase::::get().is_emergency(), Error::::CallNotAllowed); - let election_bounds = ElectionBoundsBuilder::default() - .voters_count(maybe_max_voters.unwrap_or(u32::MAX).into()) - .targets_count(maybe_max_targets.unwrap_or(u32::MAX).into()) - .build(); + let RoundSnapshot { voters, targets } = + Snapshot::::get().ok_or(Error::::MissingSnapshotMetadata)?; + let desired_targets = + DesiredTargets::::get().ok_or(Error::::MissingSnapshotMetadata)?; - let supports = T::GovernanceFallback::instant_elect( - election_bounds.voters, - election_bounds.targets, - ) - .map_err(|e| { + let supports = T::GovernanceFallback::instant_elect(voters, targets, desired_targets) + .map_err(|e| { log!(error, "GovernanceFallback failed: {:?}", e); Error::::FallbackFailed })?; - // transform BoundedVec<_, T::GovernanceFallback::MaxWinners> into - // `BoundedVec<_, T::MaxWinners>` - let supports: BoundedVec<_, T::MaxWinners> = supports - .into_inner() - .try_into() - .defensive_map_err(|_| Error::::BoundNotMet)?; - let solution = ReadySolution { supports, score: Default::default(), @@ -1279,8 +1297,7 @@ pub mod pallet { /// /// Always sorted by score. #[pallet::storage] - pub type QueuedSolution = - StorageValue<_, ReadySolution>; + pub type QueuedSolution = StorageValue<_, ReadySolutionOf>; /// Snapshot data of the round. /// @@ -1412,7 +1429,7 @@ impl Pallet { /// Current best solution, signed or unsigned, queued to be returned upon `elect`. /// /// Always sorted by score. - pub fn queued_solution() -> Option> { + pub fn queued_solution() -> Option> { QueuedSolution::::get() } @@ -1518,11 +1535,12 @@ impl Pallet { /// Parts of [`create_snapshot`] that happen outside of this pallet. /// /// Extracted for easier weight calculation. + /// + /// Note: this pallet only supports one page of voter and target snapshots. fn create_snapshot_external( ) -> Result<(Vec, Vec>, u32), ElectionError> { let election_bounds = T::ElectionBounds::get(); - - let targets = T::DataProvider::electable_targets(election_bounds.targets) + let targets = T::DataProvider::electable_targets_stateless(election_bounds.targets) .and_then(|t| { election_bounds.ensure_targets_limits( CountBound(t.len() as u32), @@ -1532,7 +1550,7 @@ impl Pallet { }) .map_err(ElectionError::DataProvider)?; - let voters = T::DataProvider::electing_voters(election_bounds.voters) + let voters = T::DataProvider::electing_voters_stateless(election_bounds.voters) .and_then(|v| { election_bounds.ensure_voters_limits( CountBound(v.len() as u32), @@ -1542,7 +1560,7 @@ impl Pallet { }) .map_err(ElectionError::DataProvider)?; - let mut desired_targets = as ElectionProviderBase>::desired_targets_checked() + let mut desired_targets = as ElectionProvider>::desired_targets_checked() .map_err(|e| ElectionError::DataProvider(e))?; // If `desired_targets` > `targets.len()`, cap `desired_targets` to that level and emit a @@ -1597,7 +1615,7 @@ impl Pallet { pub fn feasibility_check( raw_solution: RawSolution>, compute: ElectionCompute, - ) -> Result, FeasibilityError> { + ) -> Result, FeasibilityError> { let desired_targets = DesiredTargets::::get().ok_or(FeasibilityError::SnapshotUnavailable)?; @@ -1644,40 +1662,46 @@ impl Pallet { QueuedSolution::::take() .ok_or(ElectionError::::NothingQueued) .or_else(|_| { - // default data provider bounds are unbounded. calling `instant_elect` with - // unbounded data provider bounds means that the on-chain `T:Bounds` configs will - // *not* be overwritten. - T::Fallback::instant_elect( - DataProviderBounds::default(), - DataProviderBounds::default(), - ) - .map_err(|fe| ElectionError::Fallback(fe)) - .and_then(|supports| { - Ok(ReadySolution { - supports, - score: Default::default(), - compute: ElectionCompute::Fallback, + log!(warn, "No solution queued, falling back to instant fallback.",); + + #[cfg(feature = "runtime-benchmarks")] + Self::asap(); + + let (voters, targets, desired_targets) = if T::Fallback::bother() { + let RoundSnapshot { voters, targets } = Snapshot::::get().ok_or( + ElectionError::::Feasibility(FeasibilityError::SnapshotUnavailable), + )?; + let desired_targets = DesiredTargets::::get().ok_or( + ElectionError::::Feasibility(FeasibilityError::SnapshotUnavailable), + )?; + (voters, targets, desired_targets) + } else { + (Default::default(), Default::default(), Default::default()) + }; + T::Fallback::instant_elect(voters, targets, desired_targets) + .map_err(|fe| ElectionError::Fallback(fe)) + .and_then(|supports| { + Ok(ReadySolution { + supports, + score: Default::default(), + compute: ElectionCompute::Fallback, + }) }) - }) }) .map(|ReadySolution { compute, score, supports }| { Self::deposit_event(Event::ElectionFinalized { compute, score }); - if Round::::get() != 1 { - log!(info, "Finalized election round with compute {:?}.", compute); - } + log!(info, "Finalized election round with compute {:?}.", compute); supports }) .map_err(|err| { Self::deposit_event(Event::ElectionFailed); - if Round::::get() != 1 { - log!(warn, "Failed to finalize election round. reason {:?}", err); - } + log!(warn, "Failed to finalize election round. reason {:?}", err); err }) } /// record the weight of the given `supports`. - fn weigh_supports(supports: &Supports) { + fn weigh_supports(supports: &BoundedSupportsOf) { let active_voters = supports .iter() .map(|(_, x)| x) @@ -1769,35 +1793,70 @@ impl Pallet { } } -impl ElectionProviderBase for Pallet { +impl ElectionProvider for Pallet { type AccountId = T::AccountId; type BlockNumber = BlockNumberFor; type Error = ElectionError; - type MaxWinners = T::MaxWinners; + type MaxWinnersPerPage = T::MaxWinners; + type MaxBackersPerWinner = T::MaxBackersPerWinner; + type Pages = sp_core::ConstU32<1>; type DataProvider = T::DataProvider; -} -impl ElectionProvider for Pallet { - fn ongoing() -> bool { - match CurrentPhase::::get() { - Phase::Off => false, - _ => true, - } - } + fn elect(page: PageIndex) -> Result, Self::Error> { + // Note: this pallet **MUST** only by used in the single-page mode. + ensure!(page == SINGLE_PAGE, ElectionError::::MultiPageNotSupported); - fn elect() -> Result, Self::Error> { - match Self::do_elect() { - Ok(supports) => { + let res = match Self::do_elect() { + Ok(bounded_supports) => { // All went okay, record the weight, put sign to be Off, clean snapshot, etc. - Self::weigh_supports(&supports); + Self::weigh_supports(&bounded_supports); Self::rotate_round(); - Ok(supports) + Ok(bounded_supports) }, Err(why) => { log!(error, "Entering emergency mode: {:?}", why); Self::phase_transition(Phase::Emergency); Err(why) }, + }; + + log!(info, "ElectionProvider::elect({}) => {:?}", page, res.as_ref().map(|s| s.len())); + res + } + + fn duration() -> Self::BlockNumber { + let signed: BlockNumberFor = T::SignedPhase::get().saturated_into(); + let unsigned: BlockNumberFor = T::UnsignedPhase::get().saturated_into(); + signed + unsigned + } + + fn start() -> Result<(), Self::Error> { + log!( + warn, + "we received signal, but this pallet works in the basis of legacy pull based election" + ); + Ok(()) + } + + fn status() -> Result { + let has_queued = QueuedSolution::::exists(); + let phase = CurrentPhase::::get(); + match (phase, has_queued) { + (Phase::Unsigned(_), true) => Ok(true), + (Phase::Off, _) => Err(()), + _ => Ok(false), + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn asap() { + // prepare our snapshot so we can "hopefully" run a fallback. + if !Snapshot::::exists() { + Self::create_snapshot() + .inspect_err(|e| { + crate::log!(error, "failed to create snapshot while asap-preparing: {:?}", e) + }) + .unwrap() } } } @@ -2015,12 +2074,13 @@ mod tests { use super::*; use crate::{ mock::{ - multi_phase_events, raw_solution, roll_to, roll_to_signed, roll_to_unsigned, AccountId, + multi_phase_events, raw_solution, roll_to, roll_to_signed, roll_to_unsigned, ElectionsBounds, ExtBuilder, MockWeightInfo, MockedWeightInfo, MultiPhase, Runtime, - RuntimeOrigin, SignedMaxSubmissions, System, TargetIndex, Targets, Voters, + RuntimeOrigin, SignedMaxSubmissions, System, Voters, }, Phase, }; + use frame_election_provider_support::bounds::ElectionBoundsBuilder; use frame_support::{assert_noop, assert_ok}; use sp_npos_elections::{BalancingConfig, Support}; @@ -2082,7 +2142,7 @@ mod tests { assert_eq!(CurrentPhase::::get(), Phase::Unsigned((true, 25))); assert!(Snapshot::::get().is_some()); - assert_ok!(MultiPhase::elect()); + assert_ok!(MultiPhase::elect(SINGLE_PAGE)); assert!(CurrentPhase::::get().is_off()); assert!(Snapshot::::get().is_none()); @@ -2146,7 +2206,7 @@ mod tests { roll_to(30); assert!(CurrentPhase::::get().is_unsigned_open_at(20)); - assert_ok!(MultiPhase::elect()); + assert_ok!(MultiPhase::elect(SINGLE_PAGE)); assert!(CurrentPhase::::get().is_off()); assert!(Snapshot::::get().is_none()); @@ -2193,7 +2253,7 @@ mod tests { roll_to(30); assert!(CurrentPhase::::get().is_signed()); - assert_ok!(MultiPhase::elect()); + assert_ok!(MultiPhase::elect(SINGLE_PAGE)); assert!(CurrentPhase::::get().is_off()); assert!(Snapshot::::get().is_none()); @@ -2216,43 +2276,6 @@ mod tests { }); } - #[test] - fn both_phases_void() { - ExtBuilder::default().phases(0, 0).build_and_execute(|| { - roll_to(15); - assert!(CurrentPhase::::get().is_off()); - - roll_to(19); - assert!(CurrentPhase::::get().is_off()); - - roll_to(20); - assert!(CurrentPhase::::get().is_off()); - - roll_to(30); - assert!(CurrentPhase::::get().is_off()); - - // This module is now only capable of doing on-chain backup. - assert_ok!(MultiPhase::elect()); - - assert!(CurrentPhase::::get().is_off()); - - assert_eq!( - multi_phase_events(), - vec![ - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { - minimal_stake: 0, - sum_stake: 0, - sum_stake_squared: 0 - } - }, - Event::PhaseTransitioned { from: Phase::Off, to: Phase::Off, round: 2 }, - ] - ); - }); - } - #[test] fn early_termination() { // An early termination in the signed phase, with no queued solution. @@ -2268,7 +2291,7 @@ mod tests { assert_eq!(Round::::get(), 1); // An unexpected call to elect. - assert_ok!(MultiPhase::elect()); + assert_ok!(MultiPhase::elect(SINGLE_PAGE)); // We surely can't have any feasible solutions. This will cause an on-chain election. assert_eq!( @@ -2319,7 +2342,7 @@ mod tests { } // an unexpected call to elect. - assert_ok!(MultiPhase::elect()); + assert_ok!(MultiPhase::elect(SINGLE_PAGE)); // all storage items must be cleared. assert_eq!(Round::::get(), 2); @@ -2390,7 +2413,7 @@ mod tests { )); roll_to(30); - assert_ok!(MultiPhase::elect()); + assert_ok!(MultiPhase::elect(SINGLE_PAGE)); assert_eq!( multi_phase_events(), @@ -2447,7 +2470,7 @@ mod tests { )); assert!(QueuedSolution::::get().is_some()); - assert_ok!(MultiPhase::elect()); + assert_ok!(MultiPhase::elect(SINGLE_PAGE)); assert_eq!( multi_phase_events(), @@ -2481,6 +2504,35 @@ mod tests { }) } + #[test] + fn try_elect_multi_page_fails() { + let prepare_election = || { + roll_to_signed(); + assert!(Snapshot::::get().is_some()); + + // submit solution and assert it is queued and ready for elect to be called. + let (solution, _, _) = MultiPhase::mine_solution().unwrap(); + assert_ok!(MultiPhase::submit( + crate::mock::RuntimeOrigin::signed(99), + Box::new(solution), + )); + roll_to(30); + assert!(QueuedSolution::::get().is_some()); + }; + + ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { + prepare_election(); + // single page elect call works as expected. + assert_ok!(MultiPhase::elect(SINGLE_PAGE)); + }); + + ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { + prepare_election(); + // multi page calls will fail with multi-page not supported error. + assert_noop!(MultiPhase::elect(SINGLE_PAGE + 1), ElectionError::MultiPageNotSupported); + }) + } + #[test] fn fallback_strategy_works() { ExtBuilder::default().onchain_fallback(true).build_and_execute(|| { @@ -2489,15 +2541,16 @@ mod tests { // Zilch solutions thus far, but we get a result. assert!(QueuedSolution::::get().is_none()); - let supports = MultiPhase::elect().unwrap(); + let supports = MultiPhase::elect(SINGLE_PAGE).unwrap(); - assert_eq!( - supports, - vec![ - (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), - (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }) - ] - ); + let expected_supports = vec![ + (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), + (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }), + ] + .try_into() + .unwrap(); + + assert_eq!(supports, expected_supports); assert_eq!( multi_phase_events(), @@ -2531,7 +2584,10 @@ mod tests { // Zilch solutions thus far. assert!(QueuedSolution::::get().is_none()); - assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); + assert_eq!( + MultiPhase::elect(SINGLE_PAGE).unwrap_err(), + ElectionError::Fallback("NoFallback.") + ); // phase is now emergency. assert_eq!(CurrentPhase::::get(), Phase::Emergency); // snapshot is still there until election finalizes. @@ -2565,7 +2621,10 @@ mod tests { // Zilch solutions thus far. assert!(QueuedSolution::::get().is_none()); - assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); + assert_eq!( + MultiPhase::elect(SINGLE_PAGE).unwrap_err(), + ElectionError::Fallback("NoFallback.") + ); // phase is now emergency. assert_eq!(CurrentPhase::::get(), Phase::Emergency); @@ -2574,16 +2633,16 @@ mod tests { // no single account can trigger this assert_noop!( - MultiPhase::governance_fallback(RuntimeOrigin::signed(99), None, None), + MultiPhase::governance_fallback(RuntimeOrigin::signed(99)), DispatchError::BadOrigin ); // only root can - assert_ok!(MultiPhase::governance_fallback(RuntimeOrigin::root(), None, None)); + assert_ok!(MultiPhase::governance_fallback(RuntimeOrigin::root())); // something is queued now assert!(QueuedSolution::::get().is_some()); // next election call with fix everything.; - assert!(MultiPhase::elect().is_ok()); + assert!(MultiPhase::elect(SINGLE_PAGE).is_ok()); assert_eq!(CurrentPhase::::get(), Phase::Off); assert_eq!( @@ -2616,78 +2675,6 @@ mod tests { }) } - #[test] - fn snapshot_too_big_failure_onchain_fallback() { - // the `MockStaking` is designed such that if it has too many targets, it simply fails. - ExtBuilder::default().build_and_execute(|| { - // sets bounds on number of targets. - let new_bounds = ElectionBoundsBuilder::default().targets_count(1_000.into()).build(); - ElectionsBounds::set(new_bounds); - - Targets::set((0..(1_000 as AccountId) + 1).collect::>()); - - // Signed phase failed to open. - roll_to(15); - assert_eq!(CurrentPhase::::get(), Phase::Off); - - // Unsigned phase failed to open. - roll_to(25); - assert_eq!(CurrentPhase::::get(), Phase::Off); - - // On-chain backup works though. - let supports = MultiPhase::elect().unwrap(); - assert!(supports.len() > 0); - - assert_eq!( - multi_phase_events(), - vec![ - Event::ElectionFinalized { - compute: ElectionCompute::Fallback, - score: ElectionScore { - minimal_stake: 0, - sum_stake: 0, - sum_stake_squared: 0 - } - }, - Event::PhaseTransitioned { from: Phase::Off, to: Phase::Off, round: 2 }, - ] - ); - }); - } - - #[test] - fn snapshot_too_big_failure_no_fallback() { - // and if the backup mode is nothing, we go into the emergency mode.. - ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - // sets bounds on number of targets. - let new_bounds = ElectionBoundsBuilder::default().targets_count(1_000.into()).build(); - ElectionsBounds::set(new_bounds); - - Targets::set((0..(TargetIndex::max_value() as AccountId) + 1).collect::>()); - - // Signed phase failed to open. - roll_to(15); - assert_eq!(CurrentPhase::::get(), Phase::Off); - - // Unsigned phase failed to open. - roll_to(25); - assert_eq!(CurrentPhase::::get(), Phase::Off); - - roll_to(29); - let err = MultiPhase::elect().unwrap_err(); - assert_eq!(err, ElectionError::Fallback("NoFallback.")); - assert_eq!(CurrentPhase::::get(), Phase::Emergency); - - assert_eq!( - multi_phase_events(), - vec![ - Event::ElectionFailed, - Event::PhaseTransitioned { from: Phase::Off, to: Phase::Emergency, round: 1 } - ] - ); - }); - } - #[test] fn snapshot_too_big_truncate() { // but if there are too many voters, we simply truncate them. diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index d0797e100fcdf..c23d226f84780 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -18,7 +18,7 @@ use super::*; use crate::{self as multi_phase, signed::GeometricDepositBase, unsigned::MinerConfig}; use frame_election_provider_support::{ - bounds::{DataProviderBounds, ElectionBounds}, + bounds::{DataProviderBounds, ElectionBounds, ElectionBoundsBuilder}, data_provider, onchain, ElectionDataProvider, NposSolution, SequentialPhragmen, }; pub use frame_support::derive_impl; @@ -35,7 +35,7 @@ use sp_core::{ testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, - H256, + ConstBool, H256, }; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, seq_phragmen, to_supports, BalancingConfig, @@ -116,7 +116,7 @@ pub fn roll_to_round(n: u32) { while Round::::get() != n { roll_to_signed(); - frame_support::assert_ok!(MultiPhase::elect()); + frame_support::assert_ok!(MultiPhase::elect(Zero::zero())); } } @@ -296,6 +296,8 @@ parameter_types! { #[derive(Debug)] pub static MaxWinners: u32 = 200; + #[derive(Debug)] + pub static MaxBackersPerWinner: u32 = 200; // `ElectionBounds` and `OnChainElectionsBounds` are defined separately to set them independently in the tests. pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); pub static OnChainElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); @@ -309,34 +311,60 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen, Balancing>; type DataProvider = StakingMock; type WeightInfo = (); - type MaxWinners = MaxWinners; + type MaxWinnersPerPage = MaxWinners; + type MaxBackersPerWinner = MaxBackersPerWinner; + type Sort = ConstBool; type Bounds = OnChainElectionsBounds; } pub struct MockFallback; -impl ElectionProviderBase for MockFallback { - type BlockNumber = BlockNumber; +impl ElectionProvider for MockFallback { type AccountId = AccountId; + type BlockNumber = BlockNumber; type Error = &'static str; + type MaxWinnersPerPage = MaxWinners; + type MaxBackersPerWinner = MaxBackersPerWinner; + type Pages = ConstU32<1>; type DataProvider = StakingMock; - type MaxWinners = MaxWinners; + + fn elect(_remaining: PageIndex) -> Result, Self::Error> { + unimplemented!() + } + + fn duration() -> Self::BlockNumber { + 0 + } + + fn start() -> Result<(), Self::Error> { + Ok(()) + } + + fn status() -> Result { + Ok(true) + } } impl InstantElectionProvider for MockFallback { fn instant_elect( - voters_bounds: DataProviderBounds, - targets_bounds: DataProviderBounds, + voters: Vec>, + targets: Vec, + desired_targets: u32, ) -> Result, Self::Error> { if OnChainFallback::get() { onchain::OnChainExecution::::instant_elect( - voters_bounds, - targets_bounds, + voters, + targets, + desired_targets, ) .map_err(|_| "onchain::OnChainExecution failed.") } else { Err("NoFallback.") } } + + fn bother() -> bool { + OnChainFallback::get() + } } parameter_types! { @@ -362,6 +390,7 @@ impl MinerConfig for Runtime { type MaxWeight = MinerMaxWeight; type MaxVotesPerVoter = ::MaxVotesPerVoter; type MaxWinners = MaxWinners; + type MaxBackersPerWinner = MaxBackersPerWinner; type Solution = TestNposSolution; fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight { @@ -381,7 +410,7 @@ impl MinerConfig for Runtime { impl crate::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type EstimateCallFee = frame_support::traits::ConstU32<8>; + type EstimateCallFee = frame_support::traits::ConstU64<8>; type SignedPhase = SignedPhase; type UnsignedPhase = UnsignedPhase; type BetterSignedThreshold = BetterSignedThreshold; @@ -404,6 +433,7 @@ impl crate::Config for Runtime { frame_election_provider_support::onchain::OnChainExecution; type ForceOrigin = frame_system::EnsureRoot; type MaxWinners = MaxWinners; + type MaxBackersPerWinner = MaxBackersPerWinner; type MinerConfig = Self; type Solver = SequentialPhragmen, Balancing>; type ElectionBounds = ElectionsBounds; @@ -455,7 +485,12 @@ impl ElectionDataProvider for StakingMock { type AccountId = AccountId; type MaxVotesPerVoter = MaxNominations; - fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { + fn electable_targets( + bounds: DataProviderBounds, + remaining_pages: PageIndex, + ) -> data_provider::Result> { + assert!(remaining_pages.is_zero()); + let targets = Targets::get(); if !DataProviderAllowBadData::get() && @@ -467,7 +502,12 @@ impl ElectionDataProvider for StakingMock { Ok(targets) } - fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>> { + fn electing_voters( + bounds: DataProviderBounds, + remaining_pages: PageIndex, + ) -> data_provider::Result>> { + assert!(remaining_pages.is_zero()); + let mut voters = Voters::get(); if !DataProviderAllowBadData::get() { @@ -582,6 +622,10 @@ impl ExtBuilder { ::set(weight); self } + pub fn max_backers_per_winner(self, max: u32) -> Self { + MaxBackersPerWinner::set(max); + self + } pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index c685791bbdd9d..5efe848c0e626 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -21,7 +21,7 @@ use core::marker::PhantomData; use crate::{ unsigned::MinerConfig, Config, ElectionCompute, Pallet, QueuedSolution, RawSolution, - ReadySolution, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, + ReadySolutionOf, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, SnapshotMetadata, SolutionOf, SolutionOrSnapshotSize, Weight, WeightInfo, }; use alloc::{ @@ -490,7 +490,7 @@ impl Pallet { /// /// Infallible pub fn finalize_signed_phase_accept_solution( - ready_solution: ReadySolution, + ready_solution: ReadySolutionOf, who: &T::AccountId, deposit: BalanceOf, call_fee: BalanceOf, @@ -566,9 +566,9 @@ impl Pallet { mod tests { use super::*; use crate::{ - mock::*, CurrentPhase, ElectionBoundsBuilder, ElectionCompute, ElectionError, Error, Event, - Perbill, Phase, Round, + mock::*, CurrentPhase, ElectionCompute, ElectionError, Error, Event, Perbill, Phase, Round, }; + use frame_election_provider_support::bounds::ElectionBoundsBuilder; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; use sp_runtime::Percent; diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 113df3070c69f..c93e2be5a2fe2 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -19,8 +19,8 @@ use crate::{ helpers, Call, Config, CurrentPhase, DesiredTargets, ElectionCompute, Error, FeasibilityError, - Pallet, QueuedSolution, RawSolution, ReadySolution, Round, RoundSnapshot, Snapshot, - SolutionAccuracyOf, SolutionOf, SolutionOrSnapshotSize, Weight, + Pallet, QueuedSolution, RawSolution, ReadySolution, ReadySolutionOf, Round, RoundSnapshot, + Snapshot, SolutionAccuracyOf, SolutionOf, SolutionOrSnapshotSize, Weight, }; use alloc::{boxed::Box, vec::Vec}; use codec::Encode; @@ -98,6 +98,8 @@ pub enum MinerError { NoMoreVoters, /// An error from the solver. Solver, + /// Desired targets are mire than the maximum allowed winners. + TooManyDesiredTargets, } impl From for MinerError { @@ -112,16 +114,20 @@ impl From for MinerError { } } -/// Reports the trimming result of a mined solution +/// Reports the trimming result of a mined solution. #[derive(Debug, Clone)] pub struct TrimmingStatus { + /// Number of voters trimmed due to the solution weight limits. weight: usize, + /// Number of voters trimmed due to the solution length limits. length: usize, + /// Number of edges (voter -> target) trimmed due to the max backers per winner bound. + edges: usize, } impl TrimmingStatus { pub fn is_trimmed(&self) -> bool { - self.weight > 0 || self.length > 0 + self.weight > 0 || self.length > 0 || self.edges > 0 } pub fn trimmed_weight(&self) -> usize { @@ -131,6 +137,10 @@ impl TrimmingStatus { pub fn trimmed_length(&self) -> usize { self.length } + + pub fn trimmed_edges(&self) -> usize { + self.edges + } } /// Save a given call into OCW storage. @@ -194,6 +204,7 @@ impl>> Pallet { let RoundSnapshot { voters, targets } = Snapshot::::get().ok_or(MinerError::SnapshotUnAvailable)?; let desired_targets = DesiredTargets::::get().ok_or(MinerError::SnapshotUnAvailable)?; + ensure!(desired_targets <= T::MaxWinners::get(), MinerError::TooManyDesiredTargets); let (solution, score, size, is_trimmed) = Miner::::mine_solution_with_snapshot::( voters, @@ -262,16 +273,17 @@ impl>> Pallet { /// Mine a new solution as a call. Performs all checks. pub fn mine_checked_call() -> Result, MinerError> { // get the solution, with a load of checks to ensure if submitted, IT IS ABSOLUTELY VALID. - let (raw_solution, witness, _) = Self::mine_and_check()?; + let (raw_solution, witness, _trimming) = Self::mine_and_check()?; let score = raw_solution.score; let call: Call = Call::submit_unsigned { raw_solution: Box::new(raw_solution), witness }; log!( debug, - "mined a solution with score {:?} and size {}", + "mined a solution with score {:?} and size {} and trimming {:?}", score, - call.using_encoded(|b| b.len()) + call.using_encoded(|b| b.len()), + _trimming ); Ok(call) @@ -393,7 +405,7 @@ impl>> Pallet { // ensure score is being improved. Panic henceforth. ensure!( QueuedSolution::::get() - .map_or(true, |q: ReadySolution<_, _>| raw_solution.score > q.score), + .map_or(true, |q: ReadySolution<_, _, _>| raw_solution.score > q.score), Error::::PreDispatchWeakSubmission, ); @@ -427,8 +439,11 @@ pub trait MinerConfig { /// /// The weight is computed using `solution_weight`. type MaxWeight: Get; - /// The maximum number of winners that can be elected. + /// The maximum number of winners that can be elected in the single page supported by this + /// pallet. type MaxWinners: Get; + /// The maximum number of backers per winner in the last solution. + type MaxBackersPerWinner: Get; /// Something that can compute the weight of a solution. /// /// This weight estimate is then used to trim the solution, based on [`MinerConfig::MaxWeight`]. @@ -490,7 +505,11 @@ impl Miner { let ElectionResult { assignments, winners: _ } = election_result; - // Reduce (requires round-trip to staked form) + // keeps track of how many edges were trimmed out. + let mut edges_trimmed = 0; + + // Reduce (requires round-trip to staked form) and ensures the max backer per winner bound + // requirements are met. let sorted_assignments = { // convert to staked and reduce. let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; @@ -517,6 +536,53 @@ impl Miner { }, ); + // ensures that the max backers per winner bounds are respected given the supports + // generated from the assignments. We achieve that by removing edges (voter -> + // target) in the assignments with lower stake until the total number of backers per + // winner fits within the expected bounded supports. This should be performed *after* + // applying reduce over the assignments to avoid over-trimming. + // + // a potential trimming does not affect the desired targets of the solution as the + // targets have *too many* edges by definition if trimmed. + let max_backers_per_winner = T::MaxBackersPerWinner::get().saturated_into::(); + + let _ = sp_npos_elections::to_supports(&staked) + .iter_mut() + .filter(|(_, support)| support.voters.len() > max_backers_per_winner) + .for_each(|(target, ref mut support)| { + // first sort by support stake, lowest at the tail. + support.voters.sort_by(|a, b| b.1.cmp(&a.1)); + + // filter out lowest stake edge in this support. + // optimization note: collects edge voters to remove from assignments into a + // btree set to optimize the search in the next loop. + let filtered: alloc::collections::BTreeSet<_> = support + .voters + .split_off(max_backers_per_winner) + .into_iter() + .map(|(who, _stake)| who) + .collect(); + + // remove lowest stake edges calculated above from assignments. + staked.iter_mut().for_each(|assignment| { + if filtered.contains(&assignment.who) { + assignment.distribution.retain(|(t, _)| t != target); + } + }); + + edges_trimmed += filtered.len(); + }); + + debug_assert!({ + // at this point we expect the supports generated from the assignments to fit within + // the expected bounded supports. + let expected_ok: Result< + crate::BoundedSupports<_, T::MaxWinners, T::MaxBackersPerWinner>, + _, + > = sp_npos_elections::to_supports(&staked).try_into(); + expected_ok.is_ok() + }); + // convert back. assignment_staked_to_ratio_normalized(staked)? }; @@ -549,7 +615,8 @@ impl Miner { // re-calc score. let score = solution.clone().score(stake_of, voter_at, target_at)?; - let is_trimmed = TrimmingStatus { weight: weight_trimmed, length: length_trimmed }; + let is_trimmed = + TrimmingStatus { weight: weight_trimmed, length: length_trimmed, edges: edges_trimmed }; Ok((solution, score, size, is_trimmed)) } @@ -618,7 +685,7 @@ impl Miner { let remove = assignments.len().saturating_sub(maximum_allowed_voters); log_no_system!( - debug, + trace, "from {} assignments, truncating to {} for length, removing {}", assignments.len(), maximum_allowed_voters, @@ -747,7 +814,7 @@ impl Miner { snapshot: RoundSnapshot>, current_round: u32, minimum_untrusted_score: Option, - ) -> Result, FeasibilityError> { + ) -> Result, FeasibilityError> { let RawSolution { solution, score, round } = raw_solution; let RoundSnapshot { voters: snapshot_voters, targets: snapshot_targets } = snapshot; @@ -814,9 +881,12 @@ impl Miner { // Finally, check that the claimed score was indeed correct. let known_score = supports.evaluate(); + ensure!(known_score == score, FeasibilityError::InvalidScore); - // Size of winners in miner solution is equal to `desired_targets` <= `MaxWinners`. + // Size of winners in miner solution is equal to `desired_targets` <= `MaxWinners`. In + // addition, the miner should have ensured that the MaxBackerPerWinner bound in respected, + // thus this conversion should not fail. let supports = supports .try_into() .defensive_map_err(|_| FeasibilityError::BoundedConversionFailed)?; @@ -1862,6 +1932,193 @@ mod tests { }) } + #[test] + fn mine_solution_always_respects_max_backers_per_winner() { + use crate::mock::MaxBackersPerWinner; + use frame_election_provider_support::BoundedSupport; + + let targets = vec![10, 20, 30, 40]; + let voters = vec![ + (1, 11, bounded_vec![10, 20, 30]), + (2, 12, bounded_vec![10, 20, 30]), + (3, 13, bounded_vec![10, 20, 30]), + (4, 14, bounded_vec![10, 20, 30]), + (5, 15, bounded_vec![10, 20, 40]), + ]; + let snapshot = RoundSnapshot { voters: voters.clone(), targets: targets.clone() }; + let (round, desired_targets) = (1, 3); + + // election with unbounded max backers per winnner. + ExtBuilder::default().max_backers_per_winner(u32::MAX).build_and_execute(|| { + assert_eq!(MaxBackersPerWinner::get(), u32::MAX); + + let (solution, expected_score_unbounded, _, trimming_status) = + Miner::::mine_solution_with_snapshot::<::Solver>( + voters.clone(), + targets.clone(), + desired_targets, + ) + .unwrap(); + + let ready_solution = Miner::::feasibility_check( + RawSolution { solution, score: expected_score_unbounded, round }, + Default::default(), + desired_targets, + snapshot.clone(), + round, + Default::default(), + ) + .unwrap(); + + assert_eq!( + ready_solution.supports.into_iter().collect::>(), + vec![ + ( + 10, + BoundedSupport { total: 25, voters: bounded_vec![(1, 11), (5, 5), (4, 9)] } + ), + (20, BoundedSupport { total: 22, voters: bounded_vec![(2, 12), (5, 10)] }), + (30, BoundedSupport { total: 18, voters: bounded_vec![(3, 13), (4, 5)] }) + ] + ); + + // no trimmed edges. + assert_eq!(trimming_status.trimmed_edges(), 0); + }); + + // election with max 1 backer per winnner. + ExtBuilder::default().max_backers_per_winner(1).build_and_execute(|| { + assert_eq!(MaxBackersPerWinner::get(), 1); + + let (solution, expected_score_bounded, _, trimming_status) = + Miner::::mine_solution_with_snapshot::<::Solver>( + voters, + targets, + desired_targets, + ) + .unwrap(); + + let ready_solution = Miner::::feasibility_check( + RawSolution { solution, score: expected_score_bounded, round }, + Default::default(), + desired_targets, + snapshot, + round, + Default::default(), + ) + .unwrap(); + + for (_, supports) in ready_solution.supports.iter() { + assert!((supports.voters.len() as u32) <= MaxBackersPerWinner::get()); + } + + assert_eq!( + ready_solution.supports.into_iter().collect::>(), + vec![ + (10, BoundedSupport { total: 11, voters: bounded_vec![(1, 11)] }), + (20, BoundedSupport { total: 12, voters: bounded_vec![(2, 12)] }), + (30, BoundedSupport { total: 13, voters: bounded_vec![(3, 13)] }) + ] + ); + + // four trimmed edges. + assert_eq!(trimming_status.trimmed_edges(), 4); + }); + } + + #[test] + fn max_backers_edges_trims_lowest_stake() { + use crate::mock::MaxBackersPerWinner; + + ExtBuilder::default().build_and_execute(|| { + let targets = vec![10, 20, 30, 40]; + + let voters = vec![ + (1, 100, bounded_vec![10, 20]), + (2, 200, bounded_vec![10, 20, 30]), + (3, 300, bounded_vec![10, 30]), + (4, 400, bounded_vec![10, 30]), + (5, 500, bounded_vec![10, 20, 30]), + (6, 600, bounded_vec![10, 20, 30, 40]), + ]; + let snapshot = RoundSnapshot { voters: voters.clone(), targets: targets.clone() }; + let (round, desired_targets) = (1, 4); + + let max_backers_bound = u32::MAX; + let trim_backers_bound = 2; + + // election with unbounded max backers per winnner. + MaxBackersPerWinner::set(max_backers_bound); + let (solution, score, _, trimming_status) = + Miner::::mine_solution_with_snapshot::<::Solver>( + voters.clone(), + targets.clone(), + desired_targets, + ) + .unwrap(); + + assert_eq!(trimming_status.trimmed_edges(), 0); + + let ready_solution = Miner::::feasibility_check( + RawSolution { solution, score, round }, + Default::default(), + desired_targets, + snapshot.clone(), + round, + Default::default(), + ) + .unwrap(); + + let full_supports = ready_solution.supports.into_iter().collect::>(); + + // gather the expected trimmed supports (lowest stake from supports with more backers + // than expected when MaxBackersPerWinner is 2) from the full, unbounded supports. + let expected_trimmed_supports = full_supports + .into_iter() + .filter(|(_, s)| s.voters.len() as u32 > trim_backers_bound) + .map(|(t, s)| (t, s.voters.into_iter().min_by(|a, b| a.1.cmp(&b.1)).unwrap())) + .collect::>(); + + // election with bounded 2 max backers per winnner. + MaxBackersPerWinner::set(trim_backers_bound); + let (solution, score, _, trimming_status) = + Miner::::mine_solution_with_snapshot::<::Solver>( + voters.clone(), + targets.clone(), + desired_targets, + ) + .unwrap(); + + assert_eq!(trimming_status.trimmed_edges(), 2); + + let ready_solution = Miner::::feasibility_check( + RawSolution { solution, score, round }, + Default::default(), + desired_targets, + snapshot.clone(), + round, + Default::default(), + ) + .unwrap(); + + let trimmed_supports = ready_solution.supports.into_iter().collect::>(); + + // gather all trimmed_supports edges from the trimmed solution. + let mut trimmed_supports_edges_full = vec![]; + for (t, s) in trimmed_supports { + for v in s.voters { + trimmed_supports_edges_full.push((t, v)); + } + } + + // expected trimmed supports set should be disjoint to the trimmed_supports full set of + // edges. + for edge in trimmed_supports_edges_full { + assert!(!expected_trimmed_supports.contains(&edge)); + } + }) + } + #[test] fn trim_assignments_length_does_not_modify_when_short_enough() { ExtBuilder::default().build_and_execute(|| { diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 4b02fd6ca0337..88224f68edd45 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -24,7 +24,7 @@ use frame_support::{ PalletId, }; use frame_system::EnsureRoot; -use sp_core::{ConstU32, Get}; +use sp_core::{ConstBool, ConstU32, Get}; use sp_npos_elections::{ElectionScore, VoteWeight}; use sp_runtime::{ offchain::{ @@ -140,15 +140,16 @@ impl pallet_session::Config for Runtime { type SessionHandler = (OtherSessionHandler,); type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy< SLASHING_DISABLING_FACTOR, >; type WeightInfo = (); } impl pallet_session::historical::Config for Runtime { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } frame_election_provider_support::generate_solution_type!( @@ -174,6 +175,8 @@ parameter_types! { pub static TransactionPriority: transaction_validity::TransactionPriority = 1; #[derive(Debug)] pub static MaxWinners: u32 = 100; + #[derive(Debug)] + pub static MaxBackersPerWinner: u32 = 100; pub static MaxVotesPerVoter: u32 = 16; pub static SignedFixedDeposit: Balance = 1; pub static SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); @@ -184,7 +187,7 @@ parameter_types! { impl pallet_election_provider_multi_phase::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type EstimateCallFee = frame_support::traits::ConstU32<8>; + type EstimateCallFee = frame_support::traits::ConstU64<8>; type SignedPhase = SignedPhase; type UnsignedPhase = UnsignedPhase; type BetterSignedThreshold = (); @@ -202,12 +205,18 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SlashHandler = (); type RewardHandler = (); type DataProvider = Staking; - type Fallback = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, MaxWinners)>; + type Fallback = frame_election_provider_support::NoElection<( + AccountId, + BlockNumber, + Staking, + MaxWinners, + MaxBackersPerWinner, + )>; type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen, ()>; type ForceOrigin = EnsureRoot; type MaxWinners = MaxWinners; + type MaxBackersPerWinner = MaxBackersPerWinner; type ElectionBounds = ElectionBounds; type BenchmarkingConfig = NoopElectionProviderBenchmarkConfig; type WeightInfo = (); @@ -221,6 +230,7 @@ impl MinerConfig for Runtime { type MaxLength = MinerMaxLength; type MaxWeight = MinerMaxWeight; type MaxWinners = MaxWinners; + type MaxBackersPerWinner = MaxBackersPerWinner; fn solution_weight(_v: u32, _t: u32, _a: u32, _d: u32) -> Weight { Weight::zero() @@ -357,6 +367,9 @@ parameter_types! { } impl onchain::Config for OnChainSeqPhragmen { + type MaxWinnersPerPage = MaxWinners; + type MaxBackersPerWinner = MaxBackersPerWinner; + type Sort = ConstBool; type System = Runtime; type Solver = SequentialPhragmen< AccountId, @@ -364,7 +377,6 @@ impl onchain::Config for OnChainSeqPhragmen { >; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = MaxWinners; type Bounds = ElectionBounds; } diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index 32fa381e1d274..aadf87edb0e6f 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -24,6 +24,8 @@ sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-npos-elections = { workspace = true } sp-runtime = { workspace = true } +sp-std = { workspace = true } + [dev-dependencies] rand = { features = ["small_rng"], workspace = true, default-features = true } @@ -43,6 +45,7 @@ std = [ "sp-io/std", "sp-npos-elections/std", "sp-runtime/std", + "sp-std/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index ba081aa533ffd..eb87fe8d3c129 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -21,10 +21,9 @@ //! within FRAME pallets. //! //! Something that will provide the functionality of election will implement -//! [`ElectionProvider`] and its parent-trait [`ElectionProviderBase`], whilst needing an -//! associated [`ElectionProviderBase::DataProvider`], which needs to be -//! fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* -//! the receiver of the election, resulting in a diagram as below: +//! [`ElectionProvider`], whilst needing an associated [`ElectionProvider::DataProvider`], which +//! needs to be fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data +//! provider is* the receiver of the election, resulting in a diagram as below: //! //! ```ignore //! ElectionDataProvider @@ -56,8 +55,15 @@ //! //! To accommodate both type of elections in one trait, the traits lean toward **stateful //! election**, as it is more general than the stateless. This is why [`ElectionProvider::elect`] -//! has no parameters. All value and type parameter must be provided by the [`ElectionDataProvider`] -//! trait, even if the election happens immediately. +//! does not receive election data as an input. All value and type parameter must be provided by the +//! [`ElectionDataProvider`] trait, even if the election happens immediately. +//! +//! ## Multi-page election support +//! +//! Both [`ElectionDataProvider`] and [`ElectionProvider`] traits are parameterized by page, +//! supporting an election to be performed over multiple pages. This enables the +//! [`ElectionDataProvider`] implementor to provide all the election data over multiple pages. +//! Similarly [`ElectionProvider::elect`] is parameterized by page index. //! //! ## Election Data //! @@ -104,17 +110,17 @@ //! impl ElectionDataProvider for Pallet { //! type AccountId = AccountId; //! type BlockNumber = BlockNumber; -//! type MaxVotesPerVoter = ConstU32<1>; +//! type MaxVotesPerVoter = ConstU32<100>; //! //! fn desired_targets() -> data_provider::Result { //! Ok(1) //! } -//! fn electing_voters(bounds: DataProviderBounds) +//! fn electing_voters(bounds: DataProviderBounds, _page: PageIndex) //! -> data_provider::Result>> //! { //! Ok(Default::default()) //! } -//! fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { +//! fn electable_targets(bounds: DataProviderBounds, _page: PageIndex) -> data_provider::Result> { //! Ok(vec![10, 20, 30]) //! } //! fn next_election_prediction(now: BlockNumber) -> BlockNumber { @@ -126,40 +132,58 @@ //! //! mod generic_election_provider { //! use super::*; +//! use sp_runtime::traits::Zero; //! //! pub struct GenericElectionProvider(std::marker::PhantomData); //! //! pub trait Config { //! type DataProvider: ElectionDataProvider; +//! type MaxWinnersPerPage: Get; +//! type MaxBackersPerWinner: Get; +//! type Pages: Get; //! } //! -//! impl ElectionProviderBase for GenericElectionProvider { +//! impl ElectionProvider for GenericElectionProvider { //! type AccountId = AccountId; //! type BlockNumber = BlockNumber; //! type Error = &'static str; +//! type MaxBackersPerWinner = T::MaxBackersPerWinner; +//! type MaxWinnersPerPage = T::MaxWinnersPerPage; +//! type Pages = T::Pages; //! type DataProvider = T::DataProvider; -//! type MaxWinners = ConstU32<{ u32::MAX }>; //! -//! } +//! fn duration() -> ::BlockNumber { todo!() } //! -//! impl ElectionProvider for GenericElectionProvider { -//! fn ongoing() -> bool { false } -//! fn elect() -> Result, Self::Error> { -//! Self::DataProvider::electable_targets(DataProviderBounds::default()) -//! .map_err(|_| "failed to elect") -//! .map(|t| bounded_vec![(t[0], Support::default())]) +//! fn start() -> Result<(), ::Error> { todo!() } +//! +//! fn elect(page: PageIndex) -> Result, Self::Error> { +//! unimplemented!() +//! } +//! +//! fn status() -> Result { +//! unimplemented!() //! } //! } //! } //! //! mod runtime { +//! use frame_support::parameter_types; //! use super::generic_election_provider; //! use super::data_provider_mod; //! use super::AccountId; //! +//! parameter_types! { +//! pub static MaxWinnersPerPage: u32 = 10; +//! pub static MaxBackersPerWinner: u32 = 20; +//! pub static Pages: u32 = 2; +//! } +//! //! struct Runtime; //! impl generic_election_provider::Config for Runtime { //! type DataProvider = data_provider_mod::Pallet; +//! type MaxWinnersPerPage = MaxWinnersPerPage; +//! type MaxBackersPerWinner = MaxBackersPerWinner; +//! type Pages = Pages; //! } //! //! impl data_provider_mod::Config for Runtime { @@ -181,6 +205,8 @@ extern crate alloc; use alloc::{boxed::Box, vec::Vec}; use core::fmt::Debug; +use frame_support::traits::{Defensive, DefensiveResult}; +use sp_core::ConstU32; use sp_runtime::{ traits::{Bounded, Saturating, Zero}, RuntimeDebug, @@ -190,12 +216,13 @@ pub use bounds::DataProviderBounds; pub use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen}; /// Re-export the solution generation macro. pub use frame_election_provider_solution_type::generate_solution_type; -pub use frame_support::{traits::Get, weights::Weight, BoundedVec}; +pub use frame_support::{traits::Get, weights::Weight, BoundedVec, DefaultNoBound}; +use scale_info::TypeInfo; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; pub use sp_npos_elections::{ - Assignment, BalancingConfig, BoundedSupports, ElectionResult, Error, ExtendedBalance, - IdentifierT, PerThing128, Support, Supports, VoteWeight, + Assignment, BalancingConfig, ElectionResult, Error, ExtendedBalance, IdentifierT, PerThing128, + Support, Supports, VoteWeight, }; pub use traits::NposSolution; @@ -234,6 +261,9 @@ mod mock; #[cfg(test)] mod tests; +/// A page index for the multi-block elections pagination. +pub type PageIndex = u32; + /// The [`IndexAssignment`] type is an intermediate between the assignments list /// ([`&[Assignment]`][Assignment]) and `SolutionOf`. /// @@ -251,7 +281,9 @@ pub struct IndexAssignment { pub distribution: Vec<(TargetIndex, P)>, } -impl IndexAssignment { +impl + IndexAssignment +{ pub fn new( assignment: &Assignment, voter_index: impl Fn(&AccountId) -> Option, @@ -293,21 +325,45 @@ pub trait ElectionDataProvider { /// Maximum number of votes per voter that this data provider is providing. type MaxVotesPerVoter: Get; - /// All possible targets for the election, i.e. the targets that could become elected, thus - /// "electable". + /// Returns the possible targets for the election associated with the provided `page`, i.e. the + /// targets that could become elected, thus "electable". /// /// This should be implemented as a self-weighing function. The implementor should register its /// appropriate weight at the end of execution with the system pallet directly. - fn electable_targets(bounds: DataProviderBounds) - -> data_provider::Result>; + fn electable_targets( + bounds: DataProviderBounds, + page: PageIndex, + ) -> data_provider::Result>; - /// All the voters that participate in the election, thus "electing". + /// A state-less version of [`Self::electable_targets`]. + /// + /// An election-provider that only uses 1 page should use this. + fn electable_targets_stateless( + bounds: DataProviderBounds, + ) -> data_provider::Result> { + Self::electable_targets(bounds, 0) + } + + /// All the voters that participate in the election associated with page `page`, thus + /// "electing". /// /// Note that if a notion of self-vote exists, it should be represented here. /// /// This should be implemented as a self-weighing function. The implementor should register its /// appropriate weight at the end of execution with the system pallet directly. - fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>>; + fn electing_voters( + bounds: DataProviderBounds, + page: PageIndex, + ) -> data_provider::Result>>; + + /// A state-less version of [`Self::electing_voters`]. + /// + /// An election-provider that only uses 1 page should use this. + fn electing_voters_stateless( + bounds: DataProviderBounds, + ) -> data_provider::Result>> { + Self::electing_voters(bounds, 0) + } /// The number of targets to elect. /// @@ -339,6 +395,12 @@ pub trait ElectionDataProvider { ) { } + /// Instruct the data provider to fetch a page of the solution. + /// + /// This can be useful to measure the export process in benchmarking. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn fetch_page(_page: PageIndex) {} + /// Utility function only to be used in benchmarking scenarios, to be implemented optionally, /// else a noop. /// @@ -361,28 +423,39 @@ pub trait ElectionDataProvider { /// Clear all voters and targets. #[cfg(any(feature = "runtime-benchmarks", test))] fn clear() {} + + /// Force set the desired targets in the snapshot. + #[cfg(any(feature = "runtime-benchmarks", test))] + fn set_desired_targets(_count: u32) {} } -/// Base trait for types that can provide election -pub trait ElectionProviderBase { - /// The account identifier type. +/// Something that can compute the result of an election and pass it back to the caller in a paged +/// way. +pub trait ElectionProvider { + /// The account ID identifier; type AccountId; /// The block number type. type BlockNumber; - /// The error type that is returned by the provider. - type Error: Debug; + /// The error type returned by the provider; + type Error: Debug + PartialEq; - /// The upper bound on election winners that can be returned. + /// The maximum number of winners per page in results returned by this election provider. /// - /// # WARNING + /// A winner is an `AccountId` that is part of the final election result. + type MaxWinnersPerPage: Get; + + /// The maximum number of backers that a single page may have in results returned by this + /// election provider. /// - /// when communicating with the data provider, one must ensure that - /// `DataProvider::desired_targets` returns a value less than this bound. An - /// implementation can chose to either return an error and/or sort and - /// truncate the output to meet this bound. - type MaxWinners: Get; + /// A backer is an `AccountId` that "backs" one or more winners. For example, in the context of + /// nominated proof of stake, a backer is a voter that nominates a winner validator in the + /// election result. + type MaxBackersPerWinner: Get; + + /// The number of pages that this election provider supports. + type Pages: Get; /// The data provider of the election. type DataProvider: ElectionDataProvider< @@ -390,92 +463,141 @@ pub trait ElectionProviderBase { BlockNumber = Self::BlockNumber, >; + /// Elect a new set of winners. + /// + /// A complete election may require multiple calls to [`ElectionProvider::elect`] if + /// [`ElectionProvider::Pages`] is higher than one. + /// + /// The result is returned in a target major format, namely as vector of supports. + fn elect(page: PageIndex) -> Result, Self::Error>; + + /// The index of the *most* significant page that this election provider supports. + fn msp() -> PageIndex { + Self::Pages::get().saturating_sub(1) + } + + /// The index of the *least* significant page that this election provider supports. + fn lsp() -> PageIndex { + Zero::zero() + } + /// checked call to `Self::DataProvider::desired_targets()` ensuring the value never exceeds - /// [`Self::MaxWinners`]. + /// [`Self::MaxWinnersPerPage`]. fn desired_targets_checked() -> data_provider::Result { Self::DataProvider::desired_targets().and_then(|desired_targets| { - if desired_targets <= Self::MaxWinners::get() { + if desired_targets <= Self::MaxWinnersPerPage::get() { Ok(desired_targets) } else { Err("desired_targets must not be greater than MaxWinners.") } }) } -} -/// Elect a new set of winners, bounded by `MaxWinners`. -/// -/// It must always use [`ElectionProviderBase::DataProvider`] to fetch the data it needs. -/// -/// This election provider that could function asynchronously. This implies that this election might -/// needs data ahead of time (ergo, receives no arguments to `elect`), and might be `ongoing` at -/// times. -pub trait ElectionProvider: ElectionProviderBase { - /// Indicate if this election provider is currently ongoing an asynchronous election or not. - fn ongoing() -> bool; + /// Return the duration of your election. + /// + /// This excludes the duration of the export. For that, use [`Self::duration_with_export`]. + fn duration() -> Self::BlockNumber; + + /// Return the duration of your election, including the export. + fn duration_with_export() -> Self::BlockNumber + where + Self::BlockNumber: From + core::ops::Add, + { + let export: Self::BlockNumber = Self::Pages::get().into(); + Self::duration() + export + } + + /// Signal that the election should start + fn start() -> Result<(), Self::Error>; - /// Performs the election. This should be implemented as a self-weighing function. The - /// implementor should register its appropriate weight at the end of execution with the - /// system pallet directly. - fn elect() -> Result, Self::Error>; + /// Indicate whether this election provider is currently ongoing an asynchronous election. + /// + /// `Err(())` should signal that we are not doing anything, and `elect` should def. not be + /// called. `Ok(false)` means we are doing something, but work is still ongoing. `elect` should + /// not be called. `Ok(true)` means we are done and ready for a call to `elect`. + fn status() -> Result; + + /// Signal the election provider that we are about to call `elect` asap, and it should prepare + /// itself. + #[cfg(feature = "runtime-benchmarks")] + fn asap() {} } /// A (almost) marker trait that signifies an election provider as working synchronously. i.e. being /// *instant*. /// -/// This must still use the same data provider as with [`ElectionProviderBase::DataProvider`]. +/// This must still use the same data provider as with [`ElectionProvider::DataProvider`]. /// However, it can optionally overwrite the amount of voters and targets that are fetched from the /// data provider at runtime via `forced_input_voters_bound` and `forced_input_target_bound`. -pub trait InstantElectionProvider: ElectionProviderBase { +pub trait InstantElectionProvider: ElectionProvider { fn instant_elect( - forced_input_voters_bound: DataProviderBounds, - forced_input_target_bound: DataProviderBounds, + voters: Vec>, + targets: Vec, + desired_targets: u32, ) -> Result, Self::Error>; + + // Sine many instant election provider, like [`NoElection`] are meant to do nothing, this is a + // hint for the caller to call before, and if `false` is returned, not bother with passing all + // the info to `instant_elect`. + fn bother() -> bool; } /// An election provider that does nothing whatsoever. pub struct NoElection(core::marker::PhantomData); -impl ElectionProviderBase - for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> +impl ElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinnersPerPage, MaxBackersPerWinner)> where DataProvider: ElectionDataProvider, - MaxWinners: Get, + MaxWinnersPerPage: Get, + MaxBackersPerWinner: Get, + BlockNumber: Zero, { type AccountId = AccountId; type BlockNumber = BlockNumber; type Error = &'static str; - type MaxWinners = MaxWinners; + type Pages = ConstU32<1>; type DataProvider = DataProvider; -} + type MaxWinnersPerPage = MaxWinnersPerPage; + type MaxBackersPerWinner = MaxBackersPerWinner; -impl ElectionProvider - for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> -where - DataProvider: ElectionDataProvider, - MaxWinners: Get, -{ - fn ongoing() -> bool { - false + fn elect(_page: PageIndex) -> Result, Self::Error> { + Err("`NoElection` cannot do anything.") } - fn elect() -> Result, Self::Error> { + fn start() -> Result<(), Self::Error> { Err("`NoElection` cannot do anything.") } + + fn duration() -> Self::BlockNumber { + Zero::zero() + } + + fn status() -> Result { + Err(()) + } } -impl InstantElectionProvider - for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> +impl + InstantElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinnersPerPage, MaxBackersPerWinner)> where DataProvider: ElectionDataProvider, - MaxWinners: Get, + MaxWinnersPerPage: Get, + MaxBackersPerWinner: Get, + BlockNumber: Zero, { fn instant_elect( - _: DataProviderBounds, - _: DataProviderBounds, + _: Vec>, + _: Vec, + _: u32, ) -> Result, Self::Error> { Err("`NoElection` cannot do anything.") } + + fn bother() -> bool { + false + } } /// A utility trait for something to implement `ElectionDataProvider` in a sensible way. @@ -492,11 +614,36 @@ pub trait SortedListProvider { type Error: core::fmt::Debug; /// The type used by the list to compare nodes for ordering. - type Score: Bounded + Saturating + Zero; + type Score: Bounded + Saturating + Zero + Default; + + /// A typical range for this list. + /// + /// By default, this would be implemented as `Bounded` impl of `Self::Score`. + /// + /// If this is implemented by a bags-list instance, it will be the smallest and largest bags. + /// + /// This is useful to help another pallet that consumes this trait generate an even distribution + /// of nodes for testing/genesis. + fn range() -> (Self::Score, Self::Score) { + (Self::Score::min_value(), Self::Score::max_value()) + } /// An iterator over the list, which can have `take` called on it. fn iter() -> Box>; + /// Lock the list. + /// + /// This will prevent subsequent calls to + /// - [`Self::on_insert`] + /// - [`Self::on_update`] + /// - [`Self::on_decrease`] + /// - [`Self::on_increase`] + /// - [`Self::on_remove`] + fn lock(); + + /// Unlock the list. This will nullify the effects of [`Self::lock`]. + fn unlock(); + /// Returns an iterator over the list, starting right after from the given voter. /// /// May return an error if `start` is invalid. @@ -558,7 +705,7 @@ pub trait SortedListProvider { /// new list, which can lead to too many storage accesses, exhausting the block weight. fn unsafe_regenerate( all: impl IntoIterator, - score_of: Box Self::Score>, + score_of: Box Option>, ) -> u32; /// Remove all items from the list. @@ -585,8 +732,10 @@ pub trait SortedListProvider { pub trait ScoreProvider { type Score; - /// Get the current `Score` of `who`. - fn score(who: &AccountId) -> Self::Score; + /// Get the current `Score` of `who`, `None` if `who` is not present. + /// + /// `None` can be interpreted as a signal that the voter should be removed from the list. + fn score(who: &AccountId) -> Option; /// For tests, benchmarks and fuzzing, set the `score`. #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", feature = "std"))] @@ -607,7 +756,11 @@ pub trait NposSolver { fn solve( to_elect: usize, targets: Vec, - voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, + voters: Vec<( + Self::AccountId, + VoteWeight, + impl Clone + IntoIterator, + )>, ) -> Result, Self::Error>; /// Measure the weight used in the calculation of the solver. @@ -617,6 +770,70 @@ pub trait NposSolver { fn weight(voters: u32, targets: u32, vote_degree: u32) -> Weight; } +/// A quick and dirty solver, that produces a valid but probably worthless election result, but is +/// fast. +/// +/// It choses a random number of winners without any consideration. +/// +/// Then it iterates over the voters and assigns them to the winners. +/// +/// It is only meant to be used in benchmarking. +pub struct QuickDirtySolver(core::marker::PhantomData<(AccountId, Accuracy)>); +impl NposSolver + for QuickDirtySolver +{ + type AccountId = AccountId; + type Accuracy = Accuracy; + type Error = &'static str; + + fn solve( + to_elect: usize, + targets: Vec, + voters: Vec<( + Self::AccountId, + VoteWeight, + impl Clone + IntoIterator, + )>, + ) -> Result, Self::Error> { + use sp_std::collections::btree_map::BTreeMap; + + if to_elect > targets.len() { + return Err("to_elect is greater than the number of targets."); + } + + let winners = targets.into_iter().take(to_elect).collect::>(); + + let mut assignments = Vec::with_capacity(voters.len()); + let mut final_winners = BTreeMap::::new(); + + for (voter, weight, votes) in voters { + let our_winners = winners + .iter() + .filter(|w| votes.clone().into_iter().any(|v| v == **w)) + .collect::>(); + let our_winners_len = our_winners.len(); + let distribution = our_winners + .into_iter() + .map(|w| { + *final_winners.entry(w.clone()).or_default() += weight as u128; + (w.clone(), Self::Accuracy::from_rational(1, our_winners_len as u128)) + }) + .collect::>(); + + let mut assignment = Assignment { who: voter, distribution }; + assignment.try_normalize().unwrap(); + assignments.push(assignment); + } + + let winners = final_winners.into_iter().collect::>(); + Ok(ElectionResult { winners, assignments }) + } + + fn weight(_: u32, _: u32, _: u32) -> Weight { + Default::default() + } +} + /// A wrapper for [`sp_npos_elections::seq_phragmen`] that implements [`NposSolver`]. See the /// documentation of [`sp_npos_elections::seq_phragmen`] for more info. pub struct SequentialPhragmen( @@ -632,7 +849,11 @@ impl, - voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, + voters: Vec<( + Self::AccountId, + VoteWeight, + impl Clone + IntoIterator, + )>, ) -> Result, Self::Error> { sp_npos_elections::seq_phragmen(winners, targets, voters, Balancing::get()) } @@ -657,7 +878,11 @@ impl, - voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, + voters: Vec<( + Self::AccountId, + VoteWeight, + impl Clone + IntoIterator, + )>, ) -> Result, Self::Error> { sp_npos_elections::phragmms(winners, targets, voters, Balancing::get()) } @@ -674,10 +899,313 @@ pub type Voter = (AccountId, VoteWeight, BoundedVec = Voter<::AccountId, ::MaxVotesPerVoter>; -/// Same as `BoundedSupports` but parameterized by a `ElectionProviderBase`. +/// A bounded vector of supports. Bounded equivalent to [`sp_npos_elections::Supports`]. +#[derive( + Default, Debug, Encode, Decode, DecodeWithMemTracking, scale_info::TypeInfo, MaxEncodedLen, +)] +#[codec(mel_bound(AccountId: MaxEncodedLen, Bound: Get))] +#[scale_info(skip_type_params(Bound))] +pub struct BoundedSupport> { + /// Total support. + pub total: ExtendedBalance, + /// Support from voters. + pub voters: BoundedVec<(AccountId, ExtendedBalance), Bound>, +} + +impl> sp_npos_elections::Backings for &BoundedSupport { + fn total(&self) -> ExtendedBalance { + self.total + } +} + +impl> PartialEq for BoundedSupport { + fn eq(&self, other: &Self) -> bool { + self.total == other.total && self.voters == other.voters + } +} + +impl> From> for Support { + fn from(b: BoundedSupport) -> Self { + Support { total: b.total, voters: b.voters.into_inner() } + } +} + +impl> Clone for BoundedSupport { + fn clone(&self) -> Self { + Self { voters: self.voters.clone(), total: self.total } + } +} + +impl> TryFrom> + for BoundedSupport +{ + type Error = &'static str; + fn try_from(s: sp_npos_elections::Support) -> Result { + let voters = s.voters.try_into().map_err(|_| "voters bound not respected")?; + Ok(Self { voters, total: s.total }) + } +} + +impl> BoundedSupport { + /// Try and construct a `BoundedSupport` from an unbounded version, and reside to sorting and + /// truncating if needed. + /// + /// Returns the number of backers removed. + pub fn sorted_truncate_from(mut support: sp_npos_elections::Support) -> (Self, u32) { + // If bounds meet, then short circuit. + if let Ok(bounded) = support.clone().try_into() { + return (bounded, 0) + } + + let pre_len = support.voters.len(); + // sort support based on stake of each backer, low to high. + // Note: we don't sort high to low and truncate because we would have to track `total` + // updates, so we need one iteration anyhow. + support.voters.sort_by(|a, b| a.1.cmp(&b.1)); + // then do the truncation. + let mut bounded = Self { voters: Default::default(), total: 0 }; + while let Some((voter, weight)) = support.voters.pop() { + if let Err(_) = bounded.voters.try_push((voter, weight)) { + break + } + bounded.total += weight; + } + let post_len = bounded.voters.len(); + (bounded, (pre_len - post_len) as u32) + } +} + +/// A bounded vector of [`BoundedSupport`]. +/// +/// A [`BoundedSupports`] is a set of [`sp_npos_elections::Supports`] which are bounded in two +/// dimensions. `BInner` corresponds to the bound of the maximum backers per voter and `BOuter` +/// corresponds to the bound of the maximum winners that the bounded supports may contain. +/// +/// With the bounds, we control the maximum size of a bounded supports instance. +#[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo, DefaultNoBound, MaxEncodedLen)] +#[codec(mel_bound(AccountId: MaxEncodedLen, BOuter: Get, BInner: Get))] +#[scale_info(skip_type_params(BOuter, BInner))] +pub struct BoundedSupports, BInner: Get>( + pub BoundedVec<(AccountId, BoundedSupport), BOuter>, +); + +/// Try and build yourself from another `BoundedSupports` with a different set of types. +pub trait TryFromOtherBounds, BOtherInner: Get> { + fn try_from_other_bounds( + other: BoundedSupports, + ) -> Result + where + Self: Sized; +} + +impl< + AccountId, + BOuter: Get, + BInner: Get, + BOtherOuter: Get, + BOuterInner: Get, + > TryFromOtherBounds + for BoundedSupports +{ + fn try_from_other_bounds( + other: BoundedSupports, + ) -> Result { + // NOTE: we might as well do this with unsafe rust and do it faster. + if BOtherOuter::get() <= BOuter::get() && BOuterInner::get() <= BInner::get() { + // Both ouf our bounds are larger than the input's bound, can convert. + let supports = other + .into_iter() + .map(|(acc, b_support)| { + b_support + .try_into() + .defensive_map_err(|_| Error::BoundsExceeded) + .map(|b_support| (acc, b_support)) + }) + .collect::, _>>() + .defensive()?; + supports.try_into() + } else { + Err(crate::Error::BoundsExceeded) + } + } +} + +impl, BInner: Get> + BoundedSupports +{ + /// Try and construct a `BoundedSupports` from an unbounded version, and reside to sorting and + /// truncating if need ne. + /// + /// Two u32s returned are number of winners and backers removed respectively. + pub fn sorted_truncate_from(supports: Supports) -> (Self, u32, u32) { + // if bounds, meet, short circuit + if let Ok(bounded) = supports.clone().try_into() { + return (bounded, 0, 0) + } + + let pre_winners = supports.len(); + let mut backers_removed = 0; + // first, convert all inner supports. + let mut inner_supports = supports + .into_iter() + .map(|(account, support)| { + let (bounded, removed) = + BoundedSupport::::sorted_truncate_from(support); + backers_removed += removed; + (account, bounded) + }) + .collect::>(); + + // then sort outer supports based on total stake, high to low + inner_supports.sort_by(|a, b| b.1.total.cmp(&a.1.total)); + + // then take the first slice that can fit. + let bounded = BoundedSupports(BoundedVec::< + (AccountId, BoundedSupport), + BOuter, + >::truncate_from(inner_supports)); + let post_winners = bounded.len(); + (bounded, (pre_winners - post_winners) as u32, backers_removed) + } +} + +/// Helper trait for conversion of a vector of unbounded supports into a vector of bounded ones. +pub trait TryFromUnboundedPagedSupports, BInner: Get> { + fn try_from_unbounded_paged( + self, + ) -> Result>, crate::Error> + where + Self: Sized; +} + +impl, BInner: Get> + TryFromUnboundedPagedSupports for Vec> +{ + fn try_from_unbounded_paged( + self, + ) -> Result>, crate::Error> { + self.into_iter() + .map(|s| s.try_into().map_err(|_| crate::Error::BoundsExceeded)) + .collect::, _>>() + } +} + +impl, BInner: Get> sp_npos_elections::EvaluateSupport + for BoundedSupports +{ + fn evaluate(&self) -> sp_npos_elections::ElectionScore { + sp_npos_elections::evaluate_support(self.iter().map(|(_, s)| s)) + } +} + +impl, BInner: Get> sp_std::ops::DerefMut + for BoundedSupports +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl, BInner: Get> Debug + for BoundedSupports +{ + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + for s in self.0.iter() { + write!(f, "({:?}, {:?}, {:?}) ", s.0, s.1.total, s.1.voters)?; + } + Ok(()) + } +} + +impl, BInner: Get> PartialEq + for BoundedSupports +{ + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl, BInner: Get> Into> + for BoundedSupports +{ + fn into(self) -> Supports { + // NOTE: can be done faster with unsafe code. + self.0.into_iter().map(|(acc, b_support)| (acc, b_support.into())).collect() + } +} + +impl, BInner: Get> + From), BOuter>> + for BoundedSupports +{ + fn from(t: BoundedVec<(AccountId, BoundedSupport), BOuter>) -> Self { + Self(t) + } +} + +impl, BInner: Get> Clone + for BoundedSupports +{ + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl, BInner: Get> sp_std::ops::Deref + for BoundedSupports +{ + type Target = BoundedVec<(AccountId, BoundedSupport), BOuter>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl, BInner: Get> IntoIterator + for BoundedSupports +{ + type Item = (AccountId, BoundedSupport); + type IntoIter = sp_std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl, BInner: Get> TryFrom> + for BoundedSupports +{ + type Error = crate::Error; + + fn try_from(supports: Supports) -> Result { + // optimization note: pre-allocate outer bounded vec. + let mut outer_bounded_supports = BoundedVec::< + (AccountId, BoundedSupport), + BOuter, + >::with_bounded_capacity( + supports.len().min(BOuter::get() as usize) + ); + + // optimization note: avoid intermediate allocations. + supports + .into_iter() + .map(|(account, support)| (account, support.try_into().map_err(|_| ()))) + .try_for_each(|(account, maybe_bounded_supports)| { + outer_bounded_supports + .try_push((account, maybe_bounded_supports?)) + .map_err(|_| ()) + }) + .map_err(|_| crate::Error::BoundsExceeded)?; + + Ok(outer_bounded_supports.into()) + } +} + +/// Same as `BoundedSupports` but parameterized by an `ElectionProvider`. pub type BoundedSupportsOf = BoundedSupports< - ::AccountId, - ::MaxWinners, + ::AccountId, + ::MaxWinnersPerPage, + ::MaxBackersPerWinner, >; sp_core::generate_feature_enabled_macro!( diff --git a/substrate/frame/election-provider-support/src/onchain.rs b/substrate/frame/election-provider-support/src/onchain.rs index 1063d5d35aee7..20c0e972736fc 100644 --- a/substrate/frame/election-provider-support/src/onchain.rs +++ b/substrate/frame/election-provider-support/src/onchain.rs @@ -20,27 +20,27 @@ //! careful when using it onchain. use crate::{ - bounds::{DataProviderBounds, ElectionBounds, ElectionBoundsBuilder}, - BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, - InstantElectionProvider, NposSolver, WeightInfo, + bounds::{ElectionBounds, ElectionBoundsBuilder}, + BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, InstantElectionProvider, + NposSolver, PageIndex, VoterOf, WeightInfo, }; -use alloc::collections::btree_map::BTreeMap; +use alloc::{collections::btree_map::BTreeMap, vec::Vec}; use core::marker::PhantomData; use frame_support::{dispatch::DispatchClass, traits::Get}; +use frame_system::pallet_prelude::BlockNumberFor; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, to_supports, BoundedSupports, ElectionResult, VoteWeight, + assignment_ratio_to_staked_normalized, to_supports, ElectionResult, VoteWeight, }; /// Errors of the on-chain election. -#[derive(Eq, PartialEq, Debug)] +#[derive(Eq, PartialEq, Debug, Clone)] pub enum Error { /// An internal error in the NPoS elections crate. NposElections(sp_npos_elections::Error), /// Errors from the data provider. DataProvider(&'static str), - /// Configurational error caused by `desired_targets` requested by data provider exceeding - /// `MaxWinners`. - TooManyWinners, + /// Results failed to meet the bounds. + FailedToBound, } impl From for Error { @@ -62,6 +62,12 @@ pub type BoundedExecution = OnChainExecution; /// Configuration trait for an onchain election execution. pub trait Config { + /// Whether to try and sort or not. + /// + /// If `true`, the supports will be sorted by descending total support to meet the bounds. If + /// `false`, `FailedToBound` error may be returned. + type Sort: Get; + /// Needed for weight registration. type System: frame_system::Config; @@ -71,6 +77,18 @@ pub trait Config { Error = sp_npos_elections::Error, >; + /// Maximum number of backers allowed per target. + /// + /// If the bounds are exceeded due to the data returned by the data provider, the election will + /// fail. + type MaxBackersPerWinner: Get; + + /// Maximum number of winners in an election. + /// + /// If the bounds are exceeded due to the data returned by the data provider, the election will + /// fail. + type MaxWinnersPerPage: Get; + /// Something that provides the data for election. type DataProvider: ElectionDataProvider< AccountId = ::AccountId, @@ -80,102 +98,115 @@ pub trait Config { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - /// Upper bound on maximum winners from electable targets. - /// - /// As noted in the documentation of [`ElectionProviderBase::MaxWinners`], this value should - /// always be more than `DataProvider::desired_target`. - type MaxWinners: Get; - /// Elections bounds, to use when calling into [`Config::DataProvider`]. It might be overwritten /// in the `InstantElectionProvider` impl. type Bounds: Get; } -/// Same as `BoundedSupportsOf` but for `onchain::Config`. -pub type OnChainBoundedSupportsOf = BoundedSupports< - <::System as frame_system::Config>::AccountId, - ::MaxWinners, ->; - -fn elect_with_input_bounds( - bounds: ElectionBounds, -) -> Result, Error> { - let (voters, targets) = T::DataProvider::electing_voters(bounds.voters) - .and_then(|voters| Ok((voters, T::DataProvider::electable_targets(bounds.targets)?))) - .map_err(Error::DataProvider)?; - - let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?; +impl OnChainExecution { + fn elect_with_snapshot( + voters: Vec>, + targets: Vec<::AccountId>, + desired_targets: u32, + ) -> Result, Error> { + if (desired_targets > T::MaxWinnersPerPage::get()) && !T::Sort::get() { + // early exit what will fail in the last line anyways. + return Err(Error::FailedToBound) + } - if desired_targets > T::MaxWinners::get() { - // early exit - return Err(Error::TooManyWinners) + let voters_len = voters.len() as u32; + let targets_len = targets.len() as u32; + + let stake_map: BTreeMap<_, _> = voters + .iter() + .map(|(validator, vote_weight, _)| (validator.clone(), *vote_weight)) + .collect(); + + let stake_of = |w: &::AccountId| -> VoteWeight { + stake_map.get(w).cloned().unwrap_or_default() + }; + + let ElectionResult { winners: _, assignments } = + T::Solver::solve(desired_targets as usize, targets, voters).map_err(Error::from)?; + + let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; + + let weight = T::Solver::weight::( + voters_len, + targets_len, + ::MaxVotesPerVoter::get(), + ); + frame_system::Pallet::::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, + ); + + let unbounded = to_supports(&staked); + let bounded = if T::Sort::get() { + let (bounded, _winners_removed, _backers_removed) = + BoundedSupportsOf::::sorted_truncate_from(unbounded); + bounded + } else { + unbounded.try_into().map_err(|_| Error::FailedToBound)? + }; + Ok(bounded) } - let voters_len = voters.len() as u32; - let targets_len = targets.len() as u32; - - let stake_map: BTreeMap<_, _> = voters - .iter() - .map(|(validator, vote_weight, _)| (validator.clone(), *vote_weight)) - .collect(); - - let stake_of = |w: &::AccountId| -> VoteWeight { - stake_map.get(w).cloned().unwrap_or_default() - }; - - let ElectionResult { winners: _, assignments } = - T::Solver::solve(desired_targets as usize, targets, voters).map_err(Error::from)?; - - let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; - - let weight = T::Solver::weight::( - voters_len, - targets_len, - ::MaxVotesPerVoter::get(), - ); - frame_system::Pallet::::register_extra_weight_unchecked( - weight, - DispatchClass::Mandatory, - ); + fn elect_with( + bounds: ElectionBounds, + page: PageIndex, + ) -> Result, Error> { + let (voters, targets) = T::DataProvider::electing_voters(bounds.voters, page) + .and_then(|voters| { + Ok((voters, T::DataProvider::electable_targets(bounds.targets, page)?)) + }) + .map_err(Error::DataProvider)?; + let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?; + Self::elect_with_snapshot(voters, targets, desired_targets) + } +} - // defensive: Since npos solver returns a result always bounded by `desired_targets`, this is - // never expected to happen as long as npos solver does what is expected for it to do. - let supports: OnChainBoundedSupportsOf = - to_supports(&staked).try_into().map_err(|_| Error::TooManyWinners)?; +impl InstantElectionProvider for OnChainExecution { + fn instant_elect( + voters: Vec>, + targets: Vec<::AccountId>, + desired_targets: u32, + ) -> Result, Self::Error> { + Self::elect_with_snapshot(voters, targets, desired_targets) + } - Ok(supports) + fn bother() -> bool { + true + } } -impl ElectionProviderBase for OnChainExecution { +impl ElectionProvider for OnChainExecution { type AccountId = ::AccountId; - type BlockNumber = frame_system::pallet_prelude::BlockNumberFor; + type BlockNumber = BlockNumberFor; type Error = Error; - type MaxWinners = T::MaxWinners; + type MaxWinnersPerPage = T::MaxWinnersPerPage; + type MaxBackersPerWinner = T::MaxBackersPerWinner; + // can support any number of pages, as this is meant to be called "instantly". We don't care + // about this value here. + type Pages = sp_core::ConstU32<1>; type DataProvider = T::DataProvider; -} -impl InstantElectionProvider for OnChainExecution { - fn instant_elect( - forced_input_voters_bounds: DataProviderBounds, - forced_input_targets_bounds: DataProviderBounds, - ) -> Result, Self::Error> { - let elections_bounds = ElectionBoundsBuilder::from(T::Bounds::get()) - .voters_or_lower(forced_input_voters_bounds) - .targets_or_lower(forced_input_targets_bounds) - .build(); + fn elect(page: PageIndex) -> Result, Self::Error> { + let election_bounds = ElectionBoundsBuilder::from(T::Bounds::get()).build(); + Self::elect_with(election_bounds, page) + } - elect_with_input_bounds::(elections_bounds) + fn start() -> Result<(), Self::Error> { + // noop, we are always ready! + Ok(()) } -} -impl ElectionProvider for OnChainExecution { - fn ongoing() -> bool { - false + fn duration() -> Self::BlockNumber { + sp_runtime::traits::Zero::zero() } - fn elect() -> Result, Self::Error> { - let election_bounds = ElectionBoundsBuilder::from(T::Bounds::get()).build(); - elect_with_input_bounds::(election_bounds) + fn status() -> Result { + Ok(true) } } @@ -184,6 +215,7 @@ mod tests { use super::*; use crate::{ElectionProvider, PhragMMS, SequentialPhragmen}; use frame_support::{assert_noop, derive_impl, parameter_types}; + use sp_io::TestExternalities; use sp_npos_elections::Support; use sp_runtime::Perbill; type AccountId = u64; @@ -231,42 +263,50 @@ mod tests { struct PhragMMSParams; parameter_types! { - pub static MaxWinners: u32 = 10; + pub static MaxWinnersPerPage: u32 = 10; + pub static MaxBackersPerWinner: u32 = 20; pub static DesiredTargets: u32 = 2; + pub static Sort: bool = false; pub static Bounds: ElectionBounds = ElectionBoundsBuilder::default().voters_count(600.into()).targets_count(400.into()).build(); } impl Config for PhragmenParams { + type Sort = Sort; type System = Runtime; type Solver = SequentialPhragmen; type DataProvider = mock_data_provider::DataProvider; - type WeightInfo = (); - type MaxWinners = MaxWinners; + type MaxWinnersPerPage = MaxWinnersPerPage; + type MaxBackersPerWinner = MaxBackersPerWinner; type Bounds = Bounds; + type WeightInfo = (); } impl Config for PhragMMSParams { + type Sort = Sort; type System = Runtime; type Solver = PhragMMS; type DataProvider = mock_data_provider::DataProvider; + type MaxWinnersPerPage = MaxWinnersPerPage; + type MaxBackersPerWinner = MaxBackersPerWinner; type WeightInfo = (); - type MaxWinners = MaxWinners; type Bounds = Bounds; } mod mock_data_provider { + use super::*; + use crate::{data_provider, DataProviderBounds, PageIndex, VoterOf}; use frame_support::traits::ConstU32; use sp_runtime::bounded_vec; - use super::*; - use crate::{data_provider, VoterOf}; - pub struct DataProvider; impl ElectionDataProvider for DataProvider { type AccountId = AccountId; type BlockNumber = BlockNumber; type MaxVotesPerVoter = ConstU32<2>; - fn electing_voters(_: DataProviderBounds) -> data_provider::Result>> { + fn electing_voters( + _: DataProviderBounds, + _page: PageIndex, + ) -> data_provider::Result>> { Ok(vec![ (1, 10, bounded_vec![10, 20]), (2, 20, bounded_vec![30, 20]), @@ -274,7 +314,10 @@ mod tests { ]) } - fn electable_targets(_: DataProviderBounds) -> data_provider::Result> { + fn electable_targets( + _: DataProviderBounds, + _page: PageIndex, + ) -> data_provider::Result> { Ok(vec![10, 20, 30]) } @@ -290,40 +333,101 @@ mod tests { #[test] fn onchain_seq_phragmen_works() { - sp_io::TestExternalities::new_empty().execute_with(|| { + TestExternalities::new_empty().execute_with(|| { + let expected_supports = vec![ + ( + 10 as AccountId, + Support { total: 25, voters: vec![(1 as AccountId, 10), (3, 15)] }, + ), + (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }), + ] + .try_into() + .unwrap(); + assert_eq!( - as ElectionProvider>::elect().unwrap(), - vec![ - (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), - (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) - ] + as ElectionProvider>::elect(0).unwrap(), + expected_supports, ); }) } #[test] - fn too_many_winners_when_desired_targets_exceed_max_winners() { - sp_io::TestExternalities::new_empty().execute_with(|| { - // given desired targets larger than max winners - DesiredTargets::set(10); - MaxWinners::set(9); + fn sorting_false_works() { + TestExternalities::new_empty().execute_with(|| { + // Default results would have 3 targets, but we allow for only 2. + DesiredTargets::set(3); + MaxWinnersPerPage::set(2); assert_noop!( - as ElectionProvider>::elect(), - Error::TooManyWinners, + as ElectionProvider>::elect(0), + Error::FailedToBound, + ); + }); + + TestExternalities::new_empty().execute_with(|| { + // Default results would have 2 backers per winner + MaxBackersPerWinner::set(1); + + assert_noop!( + as ElectionProvider>::elect(0), + Error::FailedToBound, + ); + }); + } + + #[test] + fn sorting_true_works_winners() { + Sort::set(true); + + TestExternalities::new_empty().execute_with(|| { + let expected_supports = + vec![(30, Support { total: 35, voters: vec![(2, 20), (3, 15)] })] + .try_into() + .unwrap(); + + // we want to allow 1 winner only, and allow sorting. + MaxWinnersPerPage::set(1); + + assert_eq!( + as ElectionProvider>::elect(0).unwrap(), + expected_supports, + ); + }); + + MaxWinnersPerPage::set(10); + + TestExternalities::new_empty().execute_with(|| { + let expected_supports = vec![ + (30, Support { total: 20, voters: vec![(2, 20)] }), + (10 as AccountId, Support { total: 15, voters: vec![(3 as AccountId, 15)] }), + ] + .try_into() + .unwrap(); + + // we want to allow 2 winners only but 1 backer each, and allow sorting. + MaxBackersPerWinner::set(1); + + assert_eq!( + as ElectionProvider>::elect(0).unwrap(), + expected_supports, ); }) } #[test] fn onchain_phragmms_works() { - sp_io::TestExternalities::new_empty().execute_with(|| { + TestExternalities::new_empty().execute_with(|| { assert_eq!( - as ElectionProvider>::elect().unwrap(), + as ElectionProvider>::elect(0).unwrap(), vec![ - (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), + ( + 10 as AccountId, + Support { total: 25, voters: vec![(1 as AccountId, 10), (3, 15)] } + ), (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) ] + .try_into() + .unwrap() ); }) } diff --git a/substrate/frame/election-provider-support/src/tests.rs b/substrate/frame/election-provider-support/src/tests.rs index 6e3deb9e38346..af6206693d5ad 100644 --- a/substrate/frame/election-provider-support/src/tests.rs +++ b/substrate/frame/election-provider-support/src/tests.rs @@ -18,10 +18,12 @@ //! Tests for solution-type. #![cfg(test)] - -use crate::{mock::*, IndexAssignment, NposSolution}; +use crate::{ + mock::*, BoundedSupport, BoundedSupports, IndexAssignment, NposSolution, TryFromOtherBounds, +}; use frame_support::traits::ConstU32; use rand::SeedableRng; +use sp_npos_elections::{Support, Supports}; mod solution_type { use super::*; @@ -452,3 +454,100 @@ fn index_assignments_generate_same_solution_as_plain_assignments() { assert_eq!(solution, index_compact); } + +#[test] +fn try_from_other_bounds_works() { + let bounded: BoundedSupports, ConstU32<2>> = vec![ + (1, Support { total: 100, voters: vec![(1, 50), (2, 50)] }), + (2, Support { total: 100, voters: vec![(1, 50), (2, 50)] }), + ] + .try_into() + .unwrap(); + + // either of the bounds are smaller, won't convert + assert!(BoundedSupports::, ConstU32<2>>::try_from_other_bounds( + bounded.clone() + ) + .is_err()); + assert!(BoundedSupports::, ConstU32<1>>::try_from_other_bounds( + bounded.clone() + ) + .is_err()); + + // bounds are equal, will convert + assert!(BoundedSupports::, ConstU32<2>>::try_from_other_bounds( + bounded.clone() + ) + .is_ok()); + + // bounds are larger, will convert + assert!(BoundedSupports::, ConstU32<2>>::try_from_other_bounds( + bounded.clone() + ) + .is_ok()); + assert!(BoundedSupports::, ConstU32<3>>::try_from_other_bounds( + bounded.clone() + ) + .is_ok()); +} + +#[test] +fn support_sorted_truncate_from_works() { + let support = Support { total: 100, voters: vec![(1, 50), (2, 30), (3, 20)] }; + + let (bounded, backers_removed) = + BoundedSupport::>::sorted_truncate_from(support.clone()); + assert_eq!(bounded, Support { total: 50, voters: vec![(1, 50)] }.try_into().unwrap()); + assert_eq!(backers_removed, 2); + + let (bounded, backers_removed) = + BoundedSupport::>::sorted_truncate_from(support.clone()); + assert_eq!(bounded, Support { total: 80, voters: vec![(1, 50), (2, 30)] }.try_into().unwrap()); + assert_eq!(backers_removed, 1); + + let (bounded, backers_removed) = + BoundedSupport::>::sorted_truncate_from(support.clone()); + assert_eq!( + bounded, + Support { total: 100, voters: vec![(1, 50), (2, 30), (3, 20)] } + .try_into() + .unwrap() + ); + assert_eq!(backers_removed, 0); + + let (bounded, backers_removed) = + BoundedSupport::>::sorted_truncate_from(support.clone()); + assert_eq!( + bounded, + Support { total: 100, voters: vec![(1, 50), (2, 30), (3, 20)] } + .try_into() + .unwrap() + ); + assert_eq!(backers_removed, 0); +} + +#[test] +fn supports_sorted_truncate_from_works() { + let supports: Supports = vec![ + (1, Support { total: 303, voters: vec![(100, 100), (101, 101), (102, 102)] }), + (2, Support { total: 201, voters: vec![(100, 100), (101, 101)] }), + (3, Support { total: 406, voters: vec![(100, 100), (101, 101), (102, 102), (103, 103)] }), + ]; + + let (bounded, winners_removed, backers_removed) = + BoundedSupports::, ConstU32<2>>::sorted_truncate_from(supports); + // we trim 2 as it has least total support, and trim backers based on stake. + assert_eq!( + bounded + .clone() + .into_iter() + .map(|(k, v)| (k, Support { total: v.total, voters: v.voters.into_inner() })) + .collect::>(), + vec![ + (3, Support { total: 205, voters: vec![(103, 103), (102, 102)] }), + (1, Support { total: 203, voters: vec![(102, 102), (101, 101)] }) + ] + ); + assert_eq!(winners_removed, 1); + assert_eq!(backers_removed, 3); +} diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index 67f7ee21e6175..3aa370a0bb12b 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -16,6 +16,7 @@ // limitations under the License. use crate::{self as fast_unstake}; +use frame_election_provider_support::PageIndex; use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, @@ -82,25 +83,40 @@ parameter_types! { pub static BondingDuration: u32 = 3; pub static CurrentEra: u32 = 0; pub static Ongoing: bool = false; - pub static MaxWinners: u32 = 100; } pub struct MockElection; -impl frame_election_provider_support::ElectionProviderBase for MockElection { - type AccountId = AccountId; + +impl frame_election_provider_support::ElectionProvider for MockElection { type BlockNumber = BlockNumber; - type MaxWinners = MaxWinners; + type AccountId = AccountId; type DataProvider = Staking; + type MaxBackersPerWinner = ConstU32<100>; + type MaxWinnersPerPage = ConstU32<100>; + type Pages = ConstU32<1>; type Error = (); -} -impl frame_election_provider_support::ElectionProvider for MockElection { - fn ongoing() -> bool { - Ongoing::get() - } - fn elect() -> Result, Self::Error> { + fn elect( + _remaining_pages: PageIndex, + ) -> Result, Self::Error> { Err(()) } + + fn start() -> Result<(), Self::Error> { + Ok(()) + } + + fn duration() -> Self::BlockNumber { + 0 + } + + fn status() -> Result { + if Ongoing::get() { + Ok(false) + } else { + Err(()) + } + } } #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 4072d65b6267b..8fe651de43d99 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -42,6 +42,7 @@ pallet-staking = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 6c3870a90079c..034858c70e167 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -32,7 +32,7 @@ use frame_support::{ }; use pallet_session::historical as pallet_session_historical; use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; -use sp_core::H256; +use sp_core::{ConstBool, H256}; use sp_keyring::Ed25519Keyring; use sp_runtime::{ curve::PiecewiseLinear, @@ -69,7 +69,7 @@ impl_opaque_keys! { #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type Block = Block; - type AccountData = pallet_balances::AccountData; + type AccountData = pallet_balances::AccountData; } impl frame_system::offchain::CreateTransactionBase for Test @@ -98,7 +98,7 @@ parameter_types! { impl pallet_session::Config for Test { type RuntimeEvent = RuntimeEvent; type ValidatorId = u64; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type ShouldEndSession = pallet_session::PeriodicSessions, ConstU64<0>>; type NextSessionRotation = pallet_session::PeriodicSessions, ConstU64<0>>; type SessionManager = pallet_session::historical::NoteHistoricalRoot; @@ -109,8 +109,9 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } impl pallet_authorship::Config for Test { @@ -118,9 +119,10 @@ impl pallet_authorship::Config for Test { type EventHandler = (); } +type Balance = u128; #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { - type Balance = u128; + type Balance = Balance; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; } @@ -156,7 +158,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; + type MaxWinnersPerPage = ConstU32<100>; + type MaxBackersPerWinner = ConstU32<100>; + type Sort = ConstBool; type Bounds = ElectionsBoundsOnChain; } @@ -223,6 +227,7 @@ pub fn new_test_ext(vec: Vec<(u64, u64)>) -> sp_io::TestExternalities { } pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 4ccbde1931478..11f5e1548101f 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -132,6 +132,7 @@ impl pallet_session::Config for Runtime { } impl pallet_session::historical::Config for Runtime { + type RuntimeEvent = RuntimeEvent; type FullIdentification = u64; type FullIdentificationOf = ConvertInto; } diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index c707af4842977..1dcfb86b75cf6 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -85,7 +85,7 @@ impl pallet_staking::Config for Runtime { type AdminOrigin = frame_system::EnsureRoot; type EraPayout = pallet_staking::ConvertCurve; type ElectionProvider = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, (), ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 863e61d29da18..6f0192eee9ac8 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -3980,6 +3980,7 @@ impl Pallet { let sum_unbonding_balance = subs.sum_unbonding_balance(); let bonded_balance = T::StakeAdapter::active_stake(Pool::from(pool_account.clone())); + // TODO: should be total_balance + unclaimed_withdrawals from delegated staking let total_balance = T::StakeAdapter::total_balance(Pool::from(pool_account.clone())) // At the time when StakeAdapter is changed to `DelegateStake` but pool is not yet // migrated, the total balance would be none. diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index 7eee16cd5a4ff..cc7ea7c029ba8 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -102,7 +102,7 @@ impl pallet_staking::Config for Runtime { type BondingDuration = BondingDuration; type EraPayout = pallet_staking::ConvertCurve; type ElectionProvider = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, (), ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index 9337871bbe805..27fc5101eff63 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -38,6 +38,7 @@ pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs index 3d3cd470bc24c..cb8e3cf56d02e 100644 --- a/substrate/frame/offences/benchmarking/src/inner.rs +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -18,17 +18,10 @@ //! Offences pallet benchmarking. use alloc::{vec, vec::Vec}; - +use codec::Decode; use frame_benchmarking::v2::*; use frame_support::traits::Get; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; - -use sp_runtime::{ - traits::{Convert, Saturating, StaticLookup}, - Perbill, -}; -use sp_staking::offence::ReportOffence; - use pallet_babe::EquivocationOffence as BabeEquivocationOffence; use pallet_balances::Config as BalancesConfig; use pallet_grandpa::{ @@ -37,12 +30,17 @@ use pallet_grandpa::{ use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; use pallet_session::{ historical::{Config as HistoricalConfig, IdentificationTuple}, - Config as SessionConfig, Pallet as Session, SessionManager, + Config as SessionConfig, Pallet as Session, }; use pallet_staking::{ Config as StakingConfig, Exposure, IndividualExposure, MaxNominationsOf, Pallet as Staking, RewardDestination, ValidatorPrefs, }; +use sp_runtime::{ + traits::{Convert, Saturating, StaticLookup}, + Perbill, +}; +use sp_staking::offence::ReportOffence; const SEED: u32 = 0; @@ -51,7 +49,7 @@ const MAX_NOMINATORS: u32 = 100; pub struct Pallet(Offences); pub trait Config: - SessionConfig + SessionConfig::AccountId> + StakingConfig + OffencesConfig + HistoricalConfig @@ -109,6 +107,13 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(stash.clone()).into(), validator_prefs)?; + // set some fake keys for the validators. + let keys = + ::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()) + .unwrap(); + let proof: Vec = vec![0, 1, 2, 3]; + Session::::set_keys(RawOrigin::Signed(stash.clone()).into(), keys, proof)?; + let mut individual_exposures = vec![]; let mut nominator_stashes = vec![]; // Create n nominators @@ -145,16 +150,15 @@ fn make_offenders( num_offenders: u32, num_nominators: u32, ) -> Result>, &'static str> { - Staking::::new_session(0); - let mut offenders = vec![]; for i in 0..num_offenders { let offender = create_offender::(i + 1, num_nominators)?; + // add them to the session validators -- this is needed since `FullIdentificationOf` usually + // checks this. + pallet_session::Validators::::mutate(|v| v.push(offender.controller.clone())); offenders.push(offender); } - Staking::::start_session(0); - let id_tuples = offenders .iter() .map(|offender| { @@ -164,9 +168,17 @@ fn make_offenders( .map(|validator_id| { ::FullIdentificationOf::convert(validator_id.clone()) .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification") + .unwrap() }) .collect::>>(); + + if pallet_staking::ActiveEra::::get().is_none() { + pallet_staking::ActiveEra::::put(pallet_staking::ActiveEraInfo { + index: 0, + start: Some(0), + }); + } + Ok(id_tuples) } @@ -182,7 +194,7 @@ where // make sure that all slashes have been applied // deposit to reporter + reporter account endowed. assert_eq!(System::::read_events_for_pallet::>().len(), 2); - // (n nominators + one validator) * slashed + Slash Reported + // (n nominators + one validator) * slashed + Slash Reported + Slash Computed assert_eq!( System::::read_events_for_pallet::>().len(), 1 * (offender_count + 1) as usize + 1 diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index daa0b6c85bba9..031cf81cef987 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -53,8 +53,9 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } sp_runtime::impl_opaque_keys! { @@ -93,7 +94,7 @@ impl pallet_session::Config for Test { type SessionHandler = TestSessionHandler; type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type DisablingStrategy = (); type WeightInfo = (); } @@ -111,6 +112,7 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); + pub const Sort: bool = true; } pub struct OnChainSeqPhragmen; @@ -119,7 +121,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; + type MaxWinnersPerPage = ConstU32<100>; + type MaxBackersPerWinner = ConstU32<100>; + type Sort = Sort; type Bounds = ElectionsBounds; } @@ -188,11 +192,12 @@ frame_support::construct_runtime!( Session: pallet_session, ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config}, Offences: pallet_offences::{Pallet, Storage, Event}, - Historical: pallet_session_historical::{Pallet}, + Historical: pallet_session_historical::{Pallet, Event}, } ); pub fn new_test_ext() -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); sp_io::TestExternalities::new(t) } diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index e313a447f0f36..5a28ae7b9697e 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -24,6 +24,7 @@ pallet-staking = { workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } +sp-core = { workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } @@ -48,6 +49,7 @@ std = [ "pallet-staking/std", "pallet-timestamp/std", "scale-info/std", + "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-staking/std", diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index 5aad1cb5870ac..c0a6187e837f1 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -30,31 +30,29 @@ mod tests; extern crate alloc; use alloc::vec::Vec; +pub use pallet::*; use pallet_session::historical::IdentificationTuple; -use pallet_staking::Pallet as Staking; -use sp_runtime::Perbill; +use sp_runtime::{traits::Convert, Perbill}; use sp_staking::offence::OnOffenceHandler; -pub use pallet::*; - #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use sp_staking::SessionIndex; #[pallet::config] pub trait Config: frame_system::Config + pallet_staking::Config + pallet_session::Config::AccountId> - + pallet_session::historical::Config< - FullIdentification = pallet_staking::Existence, - FullIdentificationOf = pallet_staking::ExistenceOf, - > + + pallet_session::historical::Config { #[allow(deprecated)] type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// The offence handler provided by the runtime. + type OffenceHandler: OnOffenceHandler, Weight>; } #[pallet::pallet] @@ -81,19 +79,40 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Allows the `root`, for example sudo to create an offence. + /// + /// If `identifications` is `Some`, then the given identification is used for offence. Else, + /// it is fetched live from `session::Historical`. #[pallet::call_index(0)] #[pallet::weight(T::DbWeight::get().reads(2))] pub fn create_offence( origin: OriginFor, offenders: Vec<(T::AccountId, Perbill)>, + maybe_identifications: Option>, + maybe_session_index: Option, ) -> DispatchResult { ensure_root(origin)?; + ensure!( + maybe_identifications.as_ref().map_or(true, |ids| ids.len() == offenders.len()), + "InvalidIdentificationLength" + ); + + let identifications = + maybe_identifications.ok_or("Unreachable-NoIdentification").or_else(|_| { + offenders + .iter() + .map(|(who, _)| { + T::FullIdentificationOf::convert(who.clone()) + .ok_or("failed to call FullIdentificationOf") + }) + .collect::, _>>() + })?; + let slash_fraction = offenders.clone().into_iter().map(|(_, fraction)| fraction).collect::>(); - let offence_details = Self::get_offence_details(offenders.clone())?; + let offence_details = Self::get_offence_details(offenders.clone(), identifications)?; - Self::submit_offence(&offence_details, &slash_fraction); + Self::submit_offence(&offence_details, &slash_fraction, maybe_session_index); Self::deposit_event(Event::OffenceCreated { offenders }); Ok(()) } @@ -103,26 +122,31 @@ pub mod pallet { /// Returns a vector of offenders that are going to be slashed. fn get_offence_details( offenders: Vec<(T::AccountId, Perbill)>, + identifications: Vec, ) -> Result>, DispatchError> { Ok(offenders .clone() .into_iter() - .map(|(o, _)| OffenceDetails:: { - offender: (o.clone(), ()), + .zip(identifications.into_iter()) + .map(|((o, _), i)| OffenceDetails:: { + offender: (o.clone(), i), reporters: Default::default(), }) .collect()) } /// Submits the offence by calling the `on_offence` function. - fn submit_offence(offenders: &[OffenceDetails], slash_fraction: &[Perbill]) { - let session_index = as frame_support::traits::ValidatorSet>::session_index(); - - as OnOffenceHandler< - T::AccountId, - IdentificationTuple, - Weight, - >>::on_offence(&offenders, &slash_fraction, session_index); + fn submit_offence( + offenders: &[OffenceDetails], + slash_fraction: &[Perbill], + maybe_session_index: Option, + ) { + let session_index = maybe_session_index.unwrap_or_else(|| { + as frame_support::traits::ValidatorSet< + T::AccountId, + >>::session_index() + }); + T::OffenceHandler::on_offence(&offenders, &slash_fraction, session_index); } } } diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index cd409dfd4ec9b..ffe4367e8b810 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -25,7 +25,7 @@ use frame_election_provider_support::{ }; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, ConstU64, OneSessionHandler}, + traits::{ConstBool, ConstU32, ConstU64, OneSessionHandler}, }; use pallet_staking::{BalanceOf, StakerStatus}; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; @@ -110,7 +110,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; + type MaxWinnersPerPage = ConstU32<100>; + type MaxBackersPerWinner = ConstU32<100>; + type Sort = ConstBool; type Bounds = ElectionsBounds; } @@ -144,8 +146,9 @@ impl pallet_staking::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } sp_runtime::impl_opaque_keys! { @@ -161,7 +164,7 @@ impl pallet_session::Config for Test { type SessionHandler = (OtherSessionHandler,); type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type NextSessionRotation = pallet_session::PeriodicSessions; type DisablingStrategy = (); type WeightInfo = (); @@ -176,6 +179,7 @@ impl pallet_timestamp::Config for Test { impl Config for Test { type RuntimeEvent = RuntimeEvent; + type OffenceHandler = Staking; } pub struct ExtBuilder { @@ -294,6 +298,11 @@ pub(crate) fn run_to_block(n: BlockNumber) { ); } +/// Progress by n block. +pub(crate) fn advance_blocks(n: u64) { + run_to_block(System::block_number() + n); +} + pub(crate) fn active_era() -> EraIndex { pallet_staking::ActiveEra::::get().unwrap().index } diff --git a/substrate/frame/root-offences/src/tests.rs b/substrate/frame/root-offences/src/tests.rs index 289bb708efbbc..9a299f173abd8 100644 --- a/substrate/frame/root-offences/src/tests.rs +++ b/substrate/frame/root-offences/src/tests.rs @@ -16,8 +16,11 @@ // limitations under the License. use super::*; -use frame_support::{assert_err, assert_ok}; -use mock::{active_era, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System, Test as T}; +use frame_support::{assert_err, assert_noop, assert_ok}; +use mock::{ + active_era, advance_blocks, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System, + Test as T, +}; use pallet_staking::asset; #[test] @@ -25,7 +28,10 @@ fn create_offence_fails_given_signed_origin() { use sp_runtime::traits::BadOrigin; ExtBuilder::default().build_and_execute(|| { let offenders = (&[]).to_vec(); - assert_err!(RootOffences::create_offence(RuntimeOrigin::signed(1), offenders), BadOrigin); + assert_err!( + RootOffences::create_offence(RuntimeOrigin::signed(1), offenders, None, None), + BadOrigin + ); }) } @@ -39,9 +45,18 @@ fn create_offence_works_given_root_origin() { assert_eq!(asset::staked::(&11), 1000); let offenders = [(11, Perbill::from_percent(50))].to_vec(); - assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + assert_ok!(RootOffences::create_offence( + RuntimeOrigin::root(), + offenders.clone(), + None, + None + )); System::assert_last_event(Event::OffenceCreated { offenders }.into()); + + // offence is processed in the following block. + advance_blocks(1); + // the slash should be applied right away. assert_eq!(asset::staked::(&11), 500); @@ -58,19 +73,13 @@ fn create_offence_wont_slash_non_active_validators() { assert_eq!(active_era(), 0); - // 31 is not an active validator. - assert_eq!(asset::staked::(&31), 500); - + // we cannot even submit an offence for this, because we cannot generate an identification + // for them. let offenders = [(31, Perbill::from_percent(20)), (11, Perbill::from_percent(20))].to_vec(); - assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); - - System::assert_last_event(Event::OffenceCreated { offenders }.into()); - - // so 31 didn't get slashed. - assert_eq!(asset::staked::(&31), 500); - - // but 11 is an active validator so they got slashed. - assert_eq!(asset::staked::(&11), 800); + assert_noop!( + RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone(), None, None), + "failed to call FullIdentificationOf" + ); }) } @@ -84,12 +93,12 @@ fn create_offence_wont_slash_idle() { // 41 is idle. assert_eq!(asset::staked::(&41), 1000); + // we cannot even submit an offence for this, because we cannot generate an identification + // for them. let offenders = [(41, Perbill::from_percent(50))].to_vec(); - assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); - - System::assert_last_event(Event::OffenceCreated { offenders }.into()); - - // 41 didn't get slashed. - assert_eq!(asset::staked::(&41), 1000); + assert_noop!( + RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone(), None, None), + "failed to call FullIdentificationOf" + ); }) } diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index f65d889c9c508..1fa7e76bc29b7 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -34,6 +34,7 @@ pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } scale-info = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } +sp-staking = { workspace = true } [features] default = ["std"] @@ -47,6 +48,7 @@ std = [ "rand/std", "sp-runtime/std", "sp-session/std", + "sp-staking/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -57,4 +59,5 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", ] diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs index 9789b6bb593d0..f3707763f3509 100644 --- a/substrate/frame/session/benchmarking/src/inner.rs +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -163,6 +163,8 @@ fn check_membership_proof_setup( keys }; + // TODO: this benchmark is broken, session keys cannot be decoded into 128 bytes anymore, + // but not an issue for CI since it is `extra`. let keys: T::Keys = Decode::decode(&mut &keys[..]).unwrap(); let proof: Vec = vec![]; diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 40e7215ccc307..d4003004628e5 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -67,8 +67,9 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Existence; - type FullIdentificationOf = pallet_staking::ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::UnitIdentificationOf; } sp_runtime::impl_opaque_keys! { @@ -102,7 +103,7 @@ impl pallet_session::Config for Test { type SessionHandler = TestSessionHandler; type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; - type ValidatorIdOf = pallet_staking::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type DisablingStrategy = (); type WeightInfo = (); } @@ -119,6 +120,7 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); + pub const Sort: bool = true; } pub struct OnChainSeqPhragmen; @@ -127,7 +129,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = ConstU32<100>; + type MaxWinnersPerPage = ConstU32<100>; + type MaxBackersPerWinner = ConstU32<100>; + type Sort = Sort; type Bounds = ElectionsBounds; } diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index 0e5ba1996712d..bb778908c952f 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -47,9 +47,11 @@ use sp_trie::{ use frame_support::{ print, traits::{KeyOwnerProofSystem, ValidatorSet, ValidatorSetWithIdentification}, - Parameter, LOG_TARGET, + Parameter, }; +const LOG_TARGET: &'static str = "runtime::historical"; + use crate::{self as pallet_session, Pallet as Session}; pub use pallet::*; @@ -70,6 +72,10 @@ pub mod pallet { /// Config necessary for the historical pallet. #[pallet::config] pub trait Config: pallet_session::Config + frame_system::Config { + /// The overarching event type. + #[allow(deprecated)] + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// Full identification of the validator. type FullIdentification: Parameter; @@ -92,6 +98,15 @@ pub mod pallet { /// The range of historical sessions we store. [first, last) #[pallet::storage] pub type StoredRange = StorageValue<_, (SessionIndex, SessionIndex), OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// The merkle root of the validators of the said session were stored + RootStored { index: SessionIndex }, + /// The merkle roots of up to this session index were pruned + RootsPruned { up_to: SessionIndex }, + } } impl Pallet { @@ -118,7 +133,9 @@ impl Pallet { } else { Some((new_start, end)) } - }) + }); + + Self::deposit_event(Event::::RootsPruned { up_to }); } fn full_id_validators() -> Vec<(T::ValidatorId, T::FullIdentification)> { @@ -189,7 +206,10 @@ impl> NoteHi if let Some(new_validators) = new_validators_and_id { let count = new_validators.len() as ValidatorCount; match ProvingTrie::::generate_for(new_validators) { - Ok(trie) => >::insert(new_index, &(trie.root, count)), + Ok(trie) => { + >::insert(new_index, &(trie.root, count)); + Pallet::::deposit_event(Event::RootStored { index: new_index }); + }, Err(reason) => { print("Failed to generate historical ancestry-inclusion proof."); print(reason); @@ -199,6 +219,7 @@ impl> NoteHi let previous_index = new_index.saturating_sub(1); if let Some(previous_session) = >::get(previous_index) { >::insert(new_index, previous_session); + Pallet::::deposit_event(Event::RootStored { index: new_index }); } } @@ -255,7 +276,7 @@ impl ProvingTrie { Some(k) => k, }; - let full_id = (validator, full_id); + let id_tuple = (validator, full_id); // map each key to the owner index. for key_id in T::Keys::key_ids() { @@ -267,7 +288,7 @@ impl ProvingTrie { } // map each owner index to the full identification. - i.using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) + i.using_encoded(|k| id_tuple.using_encoded(|v| trie.insert(k, v))) .map_err(|_| "failed to insert into trie")?; } } @@ -347,13 +368,14 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet>::current_index() { let validators = Self::full_id_validators(); let count = validators.len() as ValidatorCount; - let trie = ProvingTrie::::generate_for(validators).ok()?; + let trie = ProvingTrie::::generate_for(validators).map_err(print_error).ok()?; (trie.root, count) } else { >::get(&proof.session)? }; if count != proof.validator_count { + print_error("InvalidCount"); return None } diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 37623cecfa671..45ef1ac4f100c 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -558,6 +558,9 @@ pub mod pallet { /// New session has happened. Note that the argument is the session index, not the /// block number as the type might suggest. NewSession { session_index: SessionIndex }, + /// The `NewSession` event in the current block also implies a new validator set to be + /// queued. + NewQueued, /// Validator has been disabled. ValidatorDisabled { validator: T::ValidatorId }, /// Validator has been re-enabled. @@ -686,7 +689,7 @@ impl Pallet { if changed { log!(trace, "resetting disabled validators"); // reset disabled validators if active set was changed - DisabledValidators::::take(); + DisabledValidators::::kill(); } // Increment session index. @@ -708,6 +711,7 @@ impl Pallet { // NOTE: as per the documentation on `OnSessionEnding`, we consider // the validator set as having changed even if the validators are the // same as before, as underlying economic conditions may have changed. + Self::deposit_event(Event::::NewQueued); (validators, true) } else { (Validators::::get(), false) @@ -733,14 +737,19 @@ impl Pallet { } } }; - let queued_amalgamated = next_validators - .into_iter() - .filter_map(|a| { - let k = Self::load_keys(&a)?; - check_next_changed(&k); - Some((a, k)) - }) - .collect::>(); + let queued_amalgamated = + next_validators + .into_iter() + .filter_map(|a| { + let k = + Self::load_keys(&a).or_else(|| { + log!(warn, "failed to load session key for {:?}, skipping for next session, maybe you need to set session keys for them?", a); + None + })?; + check_next_changed(&k); + Some((a, k)) + }) + .collect::>(); (queued_amalgamated, changed) }; @@ -881,7 +890,7 @@ impl Pallet { Ok(()) } - fn load_keys(v: &T::ValidatorId) -> Option { + pub fn load_keys(v: &T::ValidatorId) -> Option { NextKeys::::get(v) } @@ -937,7 +946,7 @@ impl Pallet { Err(index) => { log!(trace, "disabling validator {:?}", i); Self::deposit_event(Event::ValidatorDisabled { - validator: Validators::::get()[index as usize].clone(), + validator: Validators::::get()[i as usize].clone(), }); disabled.insert(index, (i, severity)); T::SessionHandler::on_disabled(i); @@ -954,19 +963,6 @@ impl Pallet { Self::disable_index_with_severity(i, default_severity) } - /// Disable the validator identified by `c`. (If using with the staking pallet, - /// this would be their *stash* account.) - /// - /// Returns `false` either if the validator could not be found or it was already - /// disabled. - pub fn disable(c: &T::ValidatorId) -> bool { - Validators::::get() - .iter() - .position(|i| i == c) - .map(|i| Self::disable_index(i as u32)) - .unwrap_or(false) - } - /// Re-enable the validator of index `i`, returns `false` if the validator was not disabled. pub fn reenable_index(i: u32) -> bool { if i >= Validators::::decode_len().defensive_unwrap_or(0) as u32 { @@ -977,7 +973,7 @@ impl Pallet { if let Ok(index) = disabled.binary_search_by_key(&i, |(index, _)| *index) { log!(trace, "reenabling validator {:?}", i); Self::deposit_event(Event::ValidatorReenabled { - validator: Validators::::get()[index as usize].clone(), + validator: Validators::::get()[i as usize].clone(), }); disabled.remove(index); return true; @@ -995,9 +991,15 @@ impl Pallet { /// Report an offence for the given validator and let disabling strategy decide /// what changes to disabled validators should be made. pub fn report_offence(validator: T::ValidatorId, severity: OffenceSeverity) { - log!(trace, "reporting offence for {:?} with {:?}", validator, severity); let decision = T::DisablingStrategy::decision(&validator, severity, &DisabledValidators::::get()); + log!( + debug, + "reporting offence for {:?} with {:?}, decision: {:?}", + validator, + severity, + decision + ); // Disable if let Some(offender_idx) = decision.disable { diff --git a/substrate/frame/session/src/mock.rs b/substrate/frame/session/src/mock.rs index ac8f9d320d75a..1a5fcdc9b6579 100644 --- a/substrate/frame/session/src/mock.rs +++ b/substrate/frame/session/src/mock.rs @@ -271,6 +271,7 @@ impl Config for Test { #[cfg(feature = "historical")] impl crate::historical::Config for Test { + type RuntimeEvent = RuntimeEvent; type FullIdentification = u64; type FullIdentificationOf = sp_runtime::traits::ConvertInto; } diff --git a/substrate/frame/staking-async/Cargo.toml b/substrate/frame/staking-async/Cargo.toml new file mode 100644 index 0000000000000..cdd9ca4c3fab8 --- /dev/null +++ b/substrate/frame/staking-async/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "pallet-staking-async" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "FRAME pallet staking async" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { features = ["derive"], workspace = true } +frame-election-provider-support = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +log = { workspace = true } +pallet-staking-async-rc-client = { workspace = true } +rand = { features = ["alloc"], workspace = true } +rand_chacha = { workspace = true } +scale-info = { features = ["derive", "serde"], workspace = true } +serde = { features = ["alloc", "derive"], workspace = true } +sp-application-crypto = { features = ["serde"], workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { features = ["serde"], workspace = true } + +# Optional imports for benchmarking +frame-benchmarking = { optional = true, workspace = true } + +[dev-dependencies] +frame-benchmarking = { workspace = true, default-features = true } +frame-support = { features = ["experimental"], workspace = true, default-features = true } +pallet-bags-list = { workspace = true, default-features = true } +pallet-balances = { workspace = true, default-features = true } +rand_chacha = { workspace = true, default-features = true } +sp-npos-elections = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } +substrate-test-utils = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-benchmarking?/std", + "frame-election-provider-support/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-bags-list/std", + "pallet-balances/std", + "pallet-staking-async-rc-client/std", + "rand/std", + "rand_chacha/std", + "scale-info/std", + "serde/std", + "sp-application-crypto/std", + "sp-core/std", + "sp-core/std", + "sp-io/std", + "sp-npos-elections/std", + "sp-runtime/std", + "sp-staking/std", + "sp-tracing/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-bags-list/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-staking-async-rc-client/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", +] +try-runtime = [ + "frame-election-provider-support/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-bags-list/try-runtime", + "pallet-balances/try-runtime", + "pallet-staking-async-rc-client/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/staking-async/ah-client/Cargo.toml b/substrate/frame/staking-async/ah-client/Cargo.toml new file mode 100644 index 0000000000000..2962d76660408 --- /dev/null +++ b/substrate/frame/staking-async/ah-client/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "pallet-staking-async-ah-client" +description = "Pallet handling the communication with staking-rc-client. It's role is to glue the staking pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way." +license = "Apache-2.0" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +repository.workspace = true + +[dependencies] +codec = { workspace = true, features = ["derive"] } +log = { workspace = true } +scale-info = { workspace = true, features = ["derive"] } +serde = { features = ["alloc", "derive"], workspace = true } + +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } + +frame-support = { workspace = true } +frame-system = { workspace = true } + +pallet-authorship = { workspace = true } +pallet-session = { features = ["historical"], workspace = true } +pallet-staking-async-rc-client = { workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-authorship/std", + "pallet-session/std", + "pallet-staking-async-rc-client/std", + "scale-info/std", + "serde/std", + "sp-core/std", + "sp-runtime/std", + "sp-staking/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-staking-async-rc-client/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-authorship/try-runtime", + "pallet-session/try-runtime", + "pallet-staking-async-rc-client/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/staking-async/ah-client/src/lib.rs b/substrate/frame/staking-async/ah-client/src/lib.rs new file mode 100644 index 0000000000000..416d1d12dec57 --- /dev/null +++ b/substrate/frame/staking-async/ah-client/src/lib.rs @@ -0,0 +1,753 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The client for AssetHub, intended to be used in the relay chain. +//! +//! The counter-part for this pallet is `pallet-staking-async-rc-client` on AssetHub. +//! +//! This documentation is divided into the following sections: +//! +//! 1. Incoming messages: the messages that we receive from the relay chian. +//! 2. Outgoing messages: the messaged that we sent to the relay chain. +//! 3. Local interfaces: the interfaces that we expose to other pallets in the runtime. +//! +//! ## Incoming Messages +//! +//! All incoming messages are handled via [`Call`]. They are all gated to be dispatched only by +//! [`Config::AssetHubOrigin`]. The only one is: +//! +//! * [`Call::validator_set`]: A new validator set for a planning session index. +//! +//! ## Outgoing Messages +//! +//! All outgoing messages are handled by a single trait [`SendToAssetHub`]. They match the +//! incoming messages of the `ah-client` pallet. +//! +//! ## Local Interfaces: +//! +//! Living on the relay chain, this pallet must: +//! +//! * Implement [`pallet_session::SessionManager`] (and historical variant thereof) to _give_ +//! information to the session pallet. +//! * Implements [`SessionInterface`] to _receive_ information from the session pallet +//! * Implement [`sp_staking::offence::OnOffenceHandler`]. +//! * Implement reward related APIs ([`frame_support::traits::RewardsReporter`]). +//! +//! ## Future Plans +//! +//! * Governance functions to force set validators. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +extern crate alloc; +use alloc::vec::Vec; +use frame_support::{pallet_prelude::*, traits::RewardsReporter}; +use pallet_staking_async_rc_client::{self as rc_client}; +use sp_staking::{ + offence::{OffenceDetails, OffenceSeverity}, + SessionIndex, +}; + +/// The balance type seen from this pallet's PoV. +pub type BalanceOf = ::CurrencyBalance; + +const LOG_TARGET: &str = "runtime::staking-async::ah-client"; + +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: $crate::LOG_TARGET, + concat!("[{:?}] ⬇️ ", $patter), >::block_number() $(, $values)* + ) + }; +} + +/// The interface to communicate to asset hub. +/// +/// This trait should only encapsulate our outgoing communications. Any incoming message is handled +/// with `Call`s. +/// +/// In a real runtime, this is implemented via XCM calls, much like how the coretime pallet works. +/// In a test runtime, it can be wired to direct function call. +pub trait SendToAssetHub { + /// The validator account ids. + type AccountId; + + /// Report a session change to AssetHub. + fn relay_session_report(session_report: rc_client::SessionReport); + + /// Report new offences. + fn relay_new_offence( + session_index: SessionIndex, + offences: Vec>, + ); +} + +/// A no-op implementation of [`SendToAssetHub`]. +#[cfg(feature = "std")] +impl SendToAssetHub for () { + type AccountId = u64; + + fn relay_session_report(_session_report: rc_client::SessionReport) { + panic!("relay_session_report not implemented"); + } + + fn relay_new_offence( + _session_index: SessionIndex, + _offences: Vec>, + ) { + panic!("relay_new_offence not implemented"); + } +} + +/// Interface to talk to the local session pallet. +pub trait SessionInterface { + /// The validator id type of the session pallet + type ValidatorId: Clone; + + fn validators() -> Vec; + + /// prune up to the given session index. + fn prune_up_to(index: SessionIndex); + + /// Report an offence. + /// + /// This is used to disable validators directly on the RC, until the next validator set. + fn report_offence(offender: Self::ValidatorId, severity: OffenceSeverity); +} + +impl SessionInterface + for T +{ + type ValidatorId = ::ValidatorId; + + fn validators() -> Vec { + pallet_session::Pallet::::validators() + } + + fn prune_up_to(index: SessionIndex) { + pallet_session::historical::Pallet::::prune_up_to(index) + } + fn report_offence(offender: Self::ValidatorId, severity: OffenceSeverity) { + pallet_session::Pallet::::report_offence(offender, severity) + } +} + +/// Represents the operating mode of the pallet. +#[derive( + Default, + DecodeWithMemTracking, + Encode, + Decode, + MaxEncodedLen, + TypeInfo, + Clone, + PartialEq, + Eq, + RuntimeDebug, + serde::Serialize, + serde::Deserialize, +)] +pub enum OperatingMode { + /// Fully delegated mode. + /// + /// In this mode, the pallet performs no core logic and forwards all relevant operations + /// to the fallback implementation defined in the pallet's `Config::Fallback`. + /// + /// This mode is useful when staking is in synchronous mode and waiting for the signal to + /// transition to asynchronous mode. + #[default] + Passive, + + /// Buffered mode for deferred execution. + /// + /// In this mode, offences are accepted and buffered for later transmission to AssetHub. + /// However, session change reports are dropped. + /// + /// This mode is useful when the counterpart pallet `pallet-staking-async-rc-client` on + /// AssetHub is not yet ready to process incoming messages. + Buffered, + + /// Fully active mode. + /// + /// The pallet performs all core logic directly and handles messages immediately. + /// + /// This mode is useful when staking is ready to execute in asynchronous mode and the + /// counterpart pallet `pallet-staking-async-rc-client` is ready to accept messages. + Active, +} + +impl OperatingMode { + fn can_accept_validator_set(&self) -> bool { + matches!(self, OperatingMode::Active) + } +} + +/// See `pallet_staking::DefaultExposureOf`. This type is the same, except it is duplicated here so +/// that an rc-runtime can use it after `pallet-staking` is fully removed as a dependency. +pub struct DefaultExposureOf(core::marker::PhantomData); + +impl + sp_runtime::traits::Convert< + T::AccountId, + Option>>, + > for DefaultExposureOf +{ + fn convert( + validator: T::AccountId, + ) -> Option>> { + T::SessionInterface::validators() + .contains(&validator) + .then_some(Default::default()) + } +} + +#[frame_support::pallet] +pub mod pallet { + use crate::*; + use alloc::vec; + use frame_support::traits::UnixTime; + use frame_system::pallet_prelude::*; + use pallet_session::historical; + use sp_runtime::{Perbill, Saturating}; + use sp_staking::{ + offence::{OffenceSeverity, OnOffenceHandler}, + SessionIndex, + }; + + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The balance type of the runtime's currency interface. + type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned + + codec::FullCodec + + DecodeWithMemTracking + + codec::HasCompact + + Copy + + MaybeSerializeDeserialize + + core::fmt::Debug + + Default + + From + + TypeInfo + + Send + + Sync + + MaxEncodedLen; + + /// An origin type that ensures an incoming message is from asset hub. + type AssetHubOrigin: EnsureOrigin; + + /// The origin that can control this pallet's operations. + type AdminOrigin: EnsureOrigin; + + /// Our communication interface to AssetHub. + type SendToAssetHub: SendToAssetHub; + + /// A safety measure that asserts an incoming validator set must be at least this large. + type MinimumValidatorSetSize: Get; + + /// A type that gives us a reliable unix timestamp. + type UnixTime: UnixTime; + + /// Number of points to award a validator per block authored. + type PointsPerBlock: Get; + + /// Interface to talk to the local Session pallet. + type SessionInterface: SessionInterface; + + /// A fallback implementation to delegate logic to when the pallet is in + /// [`OperatingMode::Passive`]. + /// + /// This type must implement the `historical::SessionManager` and `OnOffenceHandler` + /// interface and is expected to behave as a stand-in for this pallet’s core logic when + /// delegation is active. + type Fallback: pallet_session::SessionManager + + OnOffenceHandler< + Self::AccountId, + (Self::AccountId, sp_staking::Exposure>), + Weight, + > + frame_support::traits::RewardsReporter + + pallet_authorship::EventHandler>; + } + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + /// The queued validator sets for a given planning session index. + /// + /// This is received via a call from AssetHub. + #[pallet::storage] + #[pallet::unbounded] + pub type ValidatorSet = StorageValue<_, (u32, Vec), OptionQuery>; + + /// An incomplete validator set report. + #[pallet::storage] + #[pallet::unbounded] + pub type IncompleteValidatorSetReport = + StorageValue<_, rc_client::ValidatorSetReport, OptionQuery>; + + /// All of the points of the validators. + /// + /// This is populated during a session, and is flushed and sent over via [`SendToAssetHub`] + /// at each session end. + #[pallet::storage] + pub type ValidatorPoints = + StorageMap<_, Twox64Concat, T::AccountId, u32, ValueQuery>; + + /// Indicates the current operating mode of the pallet. + /// + /// This value determines how the pallet behaves in response to incoming and outgoing messages, + /// particularly whether it should execute logic directly, defer it, or delegate it entirely. + #[pallet::storage] + pub type Mode = StorageValue<_, OperatingMode, ValueQuery>; + + /// A storage value that is set when a `new_session` gives a new validator set to the session + /// pallet, and is cleared on the next call. + /// + /// The inner u32 is the id of the said activated validator set. While not relevant here, good + /// to know this is the planning era index of staking-async on AH. + /// + /// Once cleared, we know a validator set has been activated, and therefore we can send a + /// timestamp to AH. + #[pallet::storage] + pub type NextSessionChangesValidators = StorageValue<_, u32, OptionQuery>; + + /// The session index at which the latest elected validator set was applied. + /// + /// This is used to determine if an offence, given a session index, is in the current active era + /// or not. + #[pallet::storage] + pub type ValidatorSetAppliedAt = StorageValue<_, SessionIndex, OptionQuery>; + + /// Stores offences that have been received while the pallet is in [`OperatingMode::Buffered`] + /// mode. + /// + /// These offences are collected and buffered for later processing when the pallet transitions + /// to [`OperatingMode::Active`]. This allows the system to defer slashing or reporting logic + /// until communication with the counterpart pallet on AssetHub is fully established. + /// + /// This storage is only used in `Buffered` mode; in `Active` mode, offences are immediately + /// sent, and in `Passive` mode, they are delegated to the [`Config::Fallback`] implementation. + #[pallet::storage] + #[pallet::unbounded] + pub type BufferedOffences = + StorageValue<_, Vec<(SessionIndex, Vec>)>, ValueQuery>; + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound, frame_support::DebugNoBound)] + pub struct GenesisConfig { + /// The initial operating mode of the pallet. + pub operating_mode: OperatingMode, + pub _marker: core::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + // Set the initial operating mode of the pallet. + Mode::::put(self.operating_mode.clone()); + } + } + + #[pallet::error] + pub enum Error { + /// Could not process incoming message because incoming messages are blocked. + Blocked, + } + + #[pallet::event] + #[pallet::generate_deposit(fn deposit_event)] + pub enum Event { + /// A new validator set has been received. + ValidatorSetReceived { + id: u32, + new_validator_set_count: u32, + prune_up_to: Option, + leftover: bool, + }, + /// We could not merge, and therefore dropped a buffered message. + /// + /// Note that this event is more resembling an error, but we use an event because in this + /// pallet we need to mutate storage upon some failures. + CouldNotMergeAndDropped, + /// The validator set received is way too small, as per + /// [`Config::MinimumValidatorSetSize`]. + SetTooSmallAndDropped, + /// Something occurred that should never happen under normal operation. Logged as an event + /// for fail-safe observability. + Unexpected(UnexpectedKind), + } + + /// Represents unexpected or invariant-breaking conditions encountered during execution. + /// + /// These variants are emitted as [`Event::Unexpected`] and indicate a defensive check has + /// failed. While these should never occur under normal operation, they are useful for + /// diagnosing issues in production or test environments. + #[derive(Clone, Encode, Decode, DecodeWithMemTracking, PartialEq, TypeInfo, RuntimeDebug)] + pub enum UnexpectedKind { + /// A validator set was received while the pallet is in [`OperatingMode::Passive`]. + ReceivedValidatorSetWhilePassive, + + /// An unexpected transition was applied between operating modes. + /// + /// Expected transitions are linear and forward-only: `Passive` → `Buffered` → `Active`. + UnexpectedModeTransition, + } + + #[pallet::call] + impl Pallet { + #[pallet::call_index(0)] + #[pallet::weight( + // Reads: + // - OperatingMode + // - IncompleteValidatorSetReport + // Writes: + // - IncompleteValidatorSetReport or ValidatorSet + // ignoring `T::SessionInterface::prune_up_to` + T::DbWeight::get().reads_writes(2, 1) + )] + pub fn validator_set( + origin: OriginFor, + report: rc_client::ValidatorSetReport, + ) -> DispatchResult { + // Ensure the origin is one of Root or whatever is representing AssetHub. + log!(info, "Received new validator set report {:?}", report); + T::AssetHubOrigin::ensure_origin_or_root(origin)?; + + // Check the operating mode. + let mode = Mode::::get(); + ensure!(mode.can_accept_validator_set(), Error::::Blocked); + + let maybe_merged_report = match IncompleteValidatorSetReport::::take() { + Some(old) => old.merge(report.clone()), + None => Ok(report), + }; + + if maybe_merged_report.is_err() { + Self::deposit_event(Event::CouldNotMergeAndDropped); + debug_assert!( + IncompleteValidatorSetReport::::get().is_none(), + "we have ::take() it above, we don't want to keep the old data" + ); + return Ok(()); + } + + let report = maybe_merged_report.expect("checked above; qed"); + + if report.leftover { + // buffer it, and nothing further to do. + Self::deposit_event(Event::ValidatorSetReceived { + id: report.id, + new_validator_set_count: report.new_validator_set.len() as u32, + prune_up_to: report.prune_up_to, + leftover: report.leftover, + }); + IncompleteValidatorSetReport::::put(report); + } else { + // message is complete, process it. + let rc_client::ValidatorSetReport { + id, + leftover, + mut new_validator_set, + prune_up_to, + } = report; + + // ensure the validator set, deduplicated, is not too big. + new_validator_set.sort(); + new_validator_set.dedup(); + + if (new_validator_set.len() as u32) < T::MinimumValidatorSetSize::get() { + Self::deposit_event(Event::SetTooSmallAndDropped); + debug_assert!( + IncompleteValidatorSetReport::::get().is_none(), + "we have ::take() it above, we don't want to keep the old data" + ); + return Ok(()); + } + + Self::deposit_event(Event::ValidatorSetReceived { + id, + new_validator_set_count: new_validator_set.len() as u32, + prune_up_to, + leftover, + }); + + // Save the validator set. + ValidatorSet::::put((id, new_validator_set)); + if let Some(index) = prune_up_to { + T::SessionInterface::prune_up_to(index); + } + } + + Ok(()) + } + + /// Allows governance to force set the operating mode of the pallet. + #[pallet::call_index(1)] + #[pallet::weight(T::DbWeight::get().writes(1))] + pub fn set_mode(origin: OriginFor, mode: OperatingMode) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + Self::do_set_mode(mode); + Ok(()) + } + } + + impl + historical::SessionManager>> + for Pallet + { + fn new_session( + new_index: sp_staking::SessionIndex, + ) -> Option< + Vec<( + ::AccountId, + sp_staking::Exposure>, + )>, + > { + >::new_session(new_index) + .map(|v| v.into_iter().map(|v| (v, sp_staking::Exposure::default())).collect()) + } + + // We don't implement `new_session_genesis` because we rely on the default implementation + // which calls `new_session` + + fn start_session(start_index: SessionIndex) { + >::start_session(start_index) + } + + fn end_session(end_index: SessionIndex) { + >::end_session(end_index) + } + } + + impl pallet_session::SessionManager for Pallet { + fn new_session(session_index: u32) -> Option> { + match Mode::::get() { + OperatingMode::Passive => T::Fallback::new_session(session_index), + // In `Buffered` mode, we drop the session report and do nothing. + OperatingMode::Buffered => None, + OperatingMode::Active => Self::do_new_session(), + } + } + + fn start_session(session_index: u32) { + if Mode::::get() == OperatingMode::Passive { + T::Fallback::start_session(session_index) + } + } + + fn end_session(session_index: u32) { + match Mode::::get() { + OperatingMode::Passive => T::Fallback::end_session(session_index), + // In `Buffered` mode, we drop the session report and do nothing. + OperatingMode::Buffered => (), + OperatingMode::Active => Self::do_end_session(session_index), + } + } + } + + impl + OnOffenceHandler< + T::AccountId, + (T::AccountId, sp_staking::Exposure>), + Weight, + > for Pallet + { + fn on_offence( + offenders: &[OffenceDetails< + T::AccountId, + (T::AccountId, sp_staking::Exposure>), + >], + slash_fraction: &[Perbill], + slash_session: SessionIndex, + ) -> Weight { + let mode = Mode::::get(); + if mode == OperatingMode::Passive { + // delegate to the fallback implementation. + return T::Fallback::on_offence(offenders, slash_fraction, slash_session); + } + + // check if offence is from the active validator set. + let ongoing_offence = ValidatorSetAppliedAt::::get() + .map(|start_session| slash_session >= start_session) + .unwrap_or(false); + + let mut offenders_and_slashes = Vec::new(); + + // notify pallet-session about the offences + for (offence, fraction) in offenders.iter().cloned().zip(slash_fraction) { + if ongoing_offence { + // report the offence to the session pallet. + T::SessionInterface::report_offence( + offence.offender.0.clone(), + OffenceSeverity(*fraction), + ); + } + + // prepare an `Offence` instance for the XCM message. Note that we drop the + // identification. + let (offender, _full_identification) = offence.offender; + let reporters = offence.reporters; + offenders_and_slashes.push(rc_client::Offence { + offender, + reporters, + slash_fraction: *fraction, + }); + } + + match mode { + OperatingMode::Buffered => { + BufferedOffences::::mutate(|buffered| { + buffered.push((slash_session, offenders_and_slashes.clone())); + }); + log!(info, "Buffered offences: {:?}", offenders_and_slashes); + }, + OperatingMode::Active => { + log!(info, "sending offence report to AH"); + T::SendToAssetHub::relay_new_offence(slash_session, offenders_and_slashes); + }, + _ => (), + } + + Weight::zero() + } + } + + impl RewardsReporter for Pallet { + fn reward_by_ids(rewards: impl IntoIterator) { + match Mode::::get() { + OperatingMode::Passive => T::Fallback::reward_by_ids(rewards), + OperatingMode::Buffered | OperatingMode::Active => Self::do_reward_by_ids(rewards), + } + } + } + + impl pallet_authorship::EventHandler> for Pallet { + fn note_author(author: T::AccountId) { + match Mode::::get() { + OperatingMode::Passive => T::Fallback::note_author(author), + OperatingMode::Buffered | OperatingMode::Active => Self::do_note_author(author), + } + } + } + + impl Pallet { + /// Hook to be called when the AssetHub migration begins. + /// + /// This transitions the pallet into [`OperatingMode::Buffered`], meaning it will act as the + /// primary staking module on the relay chain but will buffer outgoing messages instead of + /// sending them to AssetHub. + /// + /// While in this mode, the pallet stops delegating to the fallback implementation and + /// temporarily accumulates events for later processing. + pub fn on_migration_start() { + debug_assert!( + Mode::::get() == OperatingMode::Passive, + "we should only be called when in passive mode" + ); + Self::do_set_mode(OperatingMode::Buffered); + } + + /// Hook to be called when the AssetHub migration is complete. + /// + /// This transitions the pallet into [`OperatingMode::Active`], meaning the counterpart + /// pallet on AssetHub is ready to accept incoming messages, and this pallet can resume + /// sending them. + /// + /// In this mode, the pallet becomes fully active and processes all staking-related events + /// directly. + pub fn on_migration_end() { + debug_assert!( + Mode::::get() == OperatingMode::Buffered, + "we should only be called when in buffered mode" + ); + Self::do_set_mode(OperatingMode::Active); + + // send all buffered offences to AssetHub. + BufferedOffences::::take().into_iter().for_each(|(slash_session, offences)| { + T::SendToAssetHub::relay_new_offence(slash_session, offences) + }); + } + + fn do_set_mode(new_mode: OperatingMode) { + let old_mode = Mode::::get(); + let unexpected = match new_mode { + // `Passive` is the initial state, and not expected to be set by the user. + OperatingMode::Passive => true, + OperatingMode::Buffered => old_mode != OperatingMode::Passive, + OperatingMode::Active => old_mode != OperatingMode::Buffered, + }; + + // this is a defensive check, and should never happen under normal operation. + if unexpected { + log!(warn, "Unexpected mode transition from {:?} to {:?}", old_mode, new_mode); + Self::deposit_event(Event::Unexpected(UnexpectedKind::UnexpectedModeTransition)); + } + + // apply new mode anyway. + Mode::::put(new_mode); + } + + fn do_new_session() -> Option> { + ValidatorSet::::take().map(|(id, val_set)| { + // store the id to be sent back in the next session back to AH + NextSessionChangesValidators::::put(id); + val_set + }) + } + + fn do_end_session(session_index: u32) { + use sp_runtime::SaturatedConversion; + + let validator_points = ValidatorPoints::::iter().drain().collect::>(); + let activation_timestamp = NextSessionChangesValidators::::take().map(|id| { + // keep track of starting session index at which the validator set was applied. + ValidatorSetAppliedAt::::put(session_index + 1); + // set the timestamp and the identifier of the validator set. + (T::UnixTime::now().as_millis().saturated_into::(), id) + }); + + let session_report = pallet_staking_async_rc_client::SessionReport { + end_index: session_index, + validator_points, + activation_timestamp, + leftover: false, + }; + + T::SendToAssetHub::relay_session_report(session_report); + } + + fn do_reward_by_ids(rewards: impl IntoIterator) { + for (validator_id, points) in rewards { + ValidatorPoints::::mutate(validator_id, |balance| { + balance.saturating_accrue(points); + }); + } + } + + fn do_note_author(author: T::AccountId) { + ValidatorPoints::::mutate(author, |points| { + points.saturating_accrue(T::PointsPerBlock::get()); + }); + } + } +} diff --git a/substrate/frame/staking-async/ahm-test/Cargo.toml b/substrate/frame/staking-async/ahm-test/Cargo.toml new file mode 100644 index 0000000000000..f17713b6e16e2 --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/Cargo.toml @@ -0,0 +1,68 @@ +[package] +name = "pallet-ahm-test" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "e2e unit tests for staking in AHM" +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dev-dependencies] +codec = { features = ["derive"], workspace = true, default-features = true } +frame = { package = "polkadot-sdk-frame", workspace = true, default-features = true } +frame-support = { workspace = true, default-features = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-session = { workspace = true, default-features = true } +sp-staking = { workspace = true, default-features = true } +sp-tracing = { workspace = true, default-features = true } + +# pallets we need in both +pallet-balances = { workspace = true, default-features = true } + +# pallets that we need in AH +frame-election-provider-support = { workspace = true, default-features = true } +pallet-election-provider-multi-block = { workspace = true, default-features = true } +pallet-staking-async = { workspace = true, default-features = true } +pallet-staking-async-rc-client = { workspace = true, default-features = true } + +# pallets we need in the RC +pallet-authorship = { workspace = true, default-features = true } +pallet-session = { workspace = true, default-features = true } +pallet-staking-async-ah-client = { workspace = true, default-features = true } +pallet-timestamp = { workspace = true, default-features = true } +# staking classic which will be replaced by ah-client +pallet-root-offences = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } + +[features] +std = [ + "log/std", +] +try-runtime = [ + "pallet-balances/try-runtime", + + "pallet-staking/try-runtime", + + "pallet-staking-async-rc-client/try-runtime", + "pallet-staking-async/try-runtime", + + "frame-election-provider-support/try-runtime", + "frame-support/try-runtime", + "frame/try-runtime", + "pallet-authorship/try-runtime", + "pallet-election-provider-multi-block/try-runtime", + "pallet-root-offences/try-runtime", + "pallet-session/try-runtime", + "pallet-staking-async-ah-client/try-runtime", + "pallet-timestamp/try-runtime", +] diff --git a/substrate/frame/staking-async/ahm-test/src/ah/mock.rs b/substrate/frame/staking-async/ahm-test/src/ah/mock.rs new file mode 100644 index 0000000000000..a255f6105c284 --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/src/ah/mock.rs @@ -0,0 +1,548 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::shared; +use frame::testing_prelude::*; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + SequentialPhragmen, +}; +use frame_support::sp_runtime::testing::TestXt; +use pallet_election_provider_multi_block as multi_block; +use pallet_staking_async::Forcing; +use pallet_staking_async_rc_client::{SessionReport, ValidatorSetReport}; +use sp_staking::SessionIndex; + +construct_runtime! { + pub enum Runtime { + System: frame_system, + Balances: pallet_balances, + + Staking: pallet_staking_async, + RcClient: pallet_staking_async_rc_client, + + MultiBlock: multi_block, + MultiBlockVerifier: multi_block::verifier, + MultiBlockSigned: multi_block::signed, + MultiBlockUnsigned: multi_block::unsigned, + } +} + +// alias Runtime with T. +pub type T = Runtime; + +pub fn roll_next() { + let now = System::block_number(); + let next = now + 1; + + System::set_block_number(next); + + Staking::on_initialize(next); + RcClient::on_initialize(next); + MultiBlock::on_initialize(next); + MultiBlockVerifier::on_initialize(next); + MultiBlockSigned::on_initialize(next); + MultiBlockUnsigned::on_initialize(next); +} + +pub fn roll_many(blocks: BlockNumber) { + let current = System::block_number(); + while System::block_number() < current + blocks { + roll_next(); + } +} + +pub fn roll_until_matches(criteria: impl Fn() -> bool, with_rc: bool) { + while !criteria() { + roll_next(); + if with_rc { + if LocalQueue::get().is_some() { + panic!("when local queue is set, you cannot roll ah forward as well!") + } + shared::in_rc(|| { + crate::rc::roll_next(); + }); + } + } +} + +/// Use the given `end_index` as the first session report, and increment as per needed. +pub(crate) fn roll_until_next_active(mut end_index: SessionIndex) -> Vec { + // receive enough session reports, such that we plan a new era + let planned_era = pallet_staking_async::session_rotation::Rotator::::planning_era(); + let active_era = pallet_staking_async::session_rotation::Rotator::::active_era(); + + while pallet_staking_async::session_rotation::Rotator::::planning_era() == planned_era + { + let report = SessionReport { + end_index, + activation_timestamp: None, + leftover: false, + validator_points: Default::default(), + }; + assert_ok!(pallet_staking_async_rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + report + )); + roll_next(); + end_index += 1; + } + + // now we have planned a new session. Roll until we have an outgoing message ready, meaning the + // election is done + LocalQueue::flush(); + loop { + let messages = LocalQueue::get_since_last_call(); + match messages.len() { + 0 => { + roll_next(); + continue; + }, + 1 => { + assert_eq!( + messages[0], + ( + System::block_number(), + OutgoingMessages::ValidatorSet(ValidatorSetReport { + id: planned_era + 1, + leftover: false, + // arbitrary, feel free to change if test setup updates + new_validator_set: vec![3, 5, 6, 8], + prune_up_to: None, + }) + ) + ); + break + }, + _ => panic!("Expected only one message in local queue, but got: {:?}", messages), + } + } + + // active era is still 0 + assert_eq!( + pallet_staking_async::session_rotation::Rotator::::active_era(), + active_era + ); + + // rc will not tell us that it has instantly activated a validator set. + let report = SessionReport { + end_index, + activation_timestamp: Some((1000, planned_era + 1)), + leftover: false, + validator_points: Default::default(), + }; + assert_ok!(pallet_staking_async_rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + report + )); + + // active era is now 1. + assert_eq!( + pallet_staking_async::session_rotation::Rotator::::active_era(), + active_era + 1 + ); + + // arbitrary, feel free to change if test setup updates + vec![3, 5, 6, 8] +} + +pub type AccountId = ::AccountId; +pub type Balance = ::Balance; +pub type Hash = ::Hash; +pub type BlockNumber = BlockNumberFor; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type Block = MockBlock; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = u128; + type AccountStore = System; +} + +frame_election_provider_support::generate_solution_type!( + pub struct TestNposSolution::< + VoterIndex = u16, + TargetIndex = u16, + Accuracy = PerU16, + MaxVoters = ConstU32::<1000> + >(16) +); + +type Extrinsic = TestXt; +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = Extrinsic; +} + +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + +type MaxVotesPerVoter = pallet_staking_async::MaxNominationsOf; +parameter_types! { + pub static MaxValidators: u32 = 32; + pub static MaxBackersPerWinner: u32 = 16; + pub static MaxExposurePageSize: u32 = 8; + pub static MaxBackersPerWinnerFinal: u32 = 16; + pub static MaxWinnersPerPage: u32 = 16; + pub static MaxLength: u32 = 4 * 1024 * 1024; + pub static Pages: u32 = 3; + pub static TargetSnapshotPerBlock: u32 = 4; + pub static VoterSnapshotPerBlock: u32 = 4; + + pub static SignedPhase: BlockNumber = 4; + pub static UnsignedPhase: BlockNumber = 4; + pub static SignedValidationPhase: BlockNumber = (2 * Pages::get() as BlockNumber); +} + +impl multi_block::unsigned::miner::MinerConfig for Runtime { + type AccountId = AccountId; + type Hash = Hash; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; + type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal; + type MaxVotesPerVoter = MaxVotesPerVoter; + type Solution = TestNposSolution; + type MaxLength = MaxLength; + type Pages = Pages; + type Solver = SequentialPhragmen; + type TargetSnapshotPerBlock = TargetSnapshotPerBlock; + type VoterSnapshotPerBlock = VoterSnapshotPerBlock; +} + +parameter_types! { + pub Bounds: ElectionBounds = ElectionBoundsBuilder::default().build(); +} + +pub struct OnChainConfig; +impl frame_election_provider_support::onchain::Config for OnChainConfig { + // unbounded + type Bounds = Bounds; + // We should not need sorting, as our bounds are large enough for the number of + // nominators/validators in this test setup. + type Sort = ConstBool; + type DataProvider = Staking; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; + type Solver = SequentialPhragmen; + type System = Runtime; + type WeightInfo = (); +} + +impl multi_block::Config for Runtime { + type AdminOrigin = EnsureRoot; + type DataProvider = Staking; + type Fallback = frame_election_provider_support::onchain::OnChainExecution; + type MinerConfig = Self; + + type Pages = Pages; + type SignedPhase = SignedPhase; + type UnsignedPhase = UnsignedPhase; + type SignedValidationPhase = SignedValidationPhase; + type TargetSnapshotPerBlock = TargetSnapshotPerBlock; + type VoterSnapshotPerBlock = VoterSnapshotPerBlock; + type Verifier = MultiBlockVerifier; + type AreWeDone = multi_block::ProceedRegardlessOf; + type WeightInfo = multi_block::weights::AllZeroWeights; +} + +impl multi_block::verifier::Config for Runtime { + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal; + type MaxWinnersPerPage = MaxWinnersPerPage; + + type SolutionDataProvider = MultiBlockSigned; + type SolutionImprovementThreshold = (); + type WeightInfo = multi_block::weights::AllZeroWeights; +} + +impl multi_block::unsigned::Config for Runtime { + type MinerPages = ConstU32<1>; + type WeightInfo = multi_block::weights::AllZeroWeights; + type MinerTxPriority = ConstU64<{ u64::MAX }>; + type OffchainRepeat = (); + type OffchainSolver = SequentialPhragmen; +} + +parameter_types! { + pub static DepositBase: Balance = 1; + pub static DepositPerPage: Balance = 1; + pub static MaxSubmissions: u32 = 2; + pub static RewardBase: Balance = 5; +} + +impl multi_block::signed::Config for Runtime { + type RuntimeHoldReason = RuntimeHoldReason; + + type Currency = Balances; + + type EjectGraceRatio = (); + type BailoutGraceRatio = (); + type DepositBase = DepositBase; + type DepositPerPage = DepositPerPage; + type EstimateCallFee = ConstU32<1>; + type MaxSubmissions = MaxSubmissions; + type RewardBase = RewardBase; + type WeightInfo = multi_block::weights::AllZeroWeights; +} + +parameter_types! { + pub static BondingDuration: u32 = 3; + pub static SlashDeferredDuration: u32 = 2; + pub static SessionsPerEra: u32 = 6; + pub static PlanningEraOffset: u32 = 1; +} + +impl pallet_staking_async::Config for Runtime { + type Filter = (); + type RuntimeHoldReason = RuntimeHoldReason; + + type AdminOrigin = EnsureRoot; + type BondingDuration = BondingDuration; + type SessionsPerEra = SessionsPerEra; + type PlanningEraOffset = PlanningEraOffset; + + type Currency = Balances; + type OldCurrency = Balances; + type CurrencyBalance = Balance; + type CurrencyToVote = (); + + type ElectionProvider = MultiBlock; + + type EraPayout = (); + type EventListeners = (); + type Reward = (); + type RewardRemainder = (); + type Slash = (); + type SlashDeferDuration = SlashDeferredDuration; + + type HistoryDepth = ConstU32<7>; + type MaxControllersInDeprecationBatch = (); + + type MaxDisabledValidators = MaxValidators; + type MaxValidatorSet = MaxValidators; + type MaxExposurePageSize = MaxExposurePageSize; + type MaxInvulnerables = MaxValidators; + type MaxUnlockingChunks = ConstU32<16>; + type NominationsQuota = pallet_staking_async::FixedNominationsQuota<16>; + + type VoterList = pallet_staking_async::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking_async::UseValidatorsMap; + + type RcClientInterface = RcClient; + + type WeightInfo = (); +} + +impl pallet_staking_async_rc_client::Config for Runtime { + type AHStakingInterface = Staking; + type SendToRelayChain = DeliverToRelay; + type RelayChainOrigin = EnsureRoot; +} + +pub struct DeliverToRelay; +impl pallet_staking_async_rc_client::SendToRelayChain for DeliverToRelay { + type AccountId = AccountId; + + fn validator_set(report: pallet_staking_async_rc_client::ValidatorSetReport) { + if let Some(mut local_queue) = LocalQueue::get() { + local_queue.push((System::block_number(), OutgoingMessages::ValidatorSet(report))); + LocalQueue::set(Some(local_queue)); + } else { + shared::CounterAHRCValidatorSet::mutate(|x| *x += 1); + shared::in_rc(|| { + let origin = crate::rc::RuntimeOrigin::root(); + pallet_staking_async_ah_client::Pallet::::validator_set( + origin, + report.clone(), + ) + .unwrap(); + }); + } + } +} + +const INITIAL_BALANCE: Balance = 1000; +const INITIAL_STAKE: Balance = 100; + +#[derive(Clone, Debug, PartialEq)] +pub enum OutgoingMessages { + ValidatorSet(pallet_staking_async_rc_client::ValidatorSetReport), +} + +parameter_types! { + pub static LocalQueue: Option> = None; + pub static LocalQueueLastIndex: usize = 0; +} + +impl LocalQueue { + pub fn get_since_last_call() -> Vec<(BlockNumber, OutgoingMessages)> { + if let Some(all) = Self::get() { + let last = LocalQueueLastIndex::get(); + LocalQueueLastIndex::set(all.len()); + all.into_iter().skip(last).collect() + } else { + panic!("Must set local_queue()!") + } + } + + pub fn flush() { + let _ = Self::get_since_last_call(); + } +} + +pub struct ExtBuilder { + // if true, emulate pre-ahm-migration state + pre_migration: bool, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { pre_migration: false } + } +} + +impl ExtBuilder { + /// Set this if you want to emulate pre-migration state of staking-async. + pub fn pre_migration(self) -> Self { + Self { pre_migration: true } + } + + /// Set this if you want to test the ah-runtime locally. This will push outgoing messages to + /// `LocalQueue` instead of enacting them on RC. + pub fn local_queue(self) -> Self { + LocalQueue::set(Some(Default::default())); + self + } + + pub fn slash_defer_duration(self, duration: u32) -> Self { + SlashDeferredDuration::set(duration); + self + } + + pub fn build(self) -> TestState { + let _ = sp_tracing::try_init_simple(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + // Note: The state in pallet-staking-async is retained even when pre-migration is set. + // This does not impact the tests, but for strict accuracy, be aware that the state isn't + // fully representative. + let validators = vec![1, 2, 3, 4, 5, 6, 7, 8] + .into_iter() + .map(|x| (x, INITIAL_STAKE, pallet_staking_async::StakerStatus::Validator)); + + let nominators = vec![ + (100, vec![1, 2]), + (101, vec![2, 5]), + (102, vec![1, 1]), + (103, vec![3, 3]), + (104, vec![1, 5]), + (105, vec![5, 4]), + (106, vec![6, 2]), + (107, vec![1, 6]), + (108, vec![2, 7]), + (109, vec![4, 8]), + (110, vec![5, 2]), + (111, vec![6, 6]), + (112, vec![8, 1]), + ] + .into_iter() + .map(|(x, y)| (x, INITIAL_STAKE, pallet_staking_async::StakerStatus::Nominator(y))); + + let stakers = validators.chain(nominators).collect::>(); + let balances = stakers + .clone() + .into_iter() + .map(|(x, _, _)| (x, INITIAL_BALANCE)) + .collect::>(); + + pallet_balances::GenesisConfig:: { balances, ..Default::default() } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_staking_async::GenesisConfig:: { + stakers, + validator_count: 4, + active_era: (0, 0, 0), + force_era: if self.pre_migration { Forcing::ForceNone } else { Forcing::default() }, + ..Default::default() + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut state: TestState = t.into(); + + state.execute_with(|| { + // initialises events + roll_next(); + }); + + state + } +} + +parameter_types! { + static StakingEventsIndex: usize = 0; + static ElectionEventsIndex: usize = 0; + static RcClientEventsIndex: usize = 0; +} + +pub(crate) fn rc_client_events_since_last_call() -> Vec> { + let all: Vec<_> = System::events() + .into_iter() + .filter_map( + |r| if let RuntimeEvent::RcClient(inner) = r.event { Some(inner) } else { None }, + ) + .collect(); + let seen = RcClientEventsIndex::get(); + RcClientEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} + +pub(crate) fn staking_events_since_last_call() -> Vec> { + let all: Vec<_> = System::events() + .into_iter() + .filter_map(|r| if let RuntimeEvent::Staking(inner) = r.event { Some(inner) } else { None }) + .collect(); + let seen = StakingEventsIndex::get(); + StakingEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} + +pub(crate) fn election_events_since_last_call() -> Vec> { + let all: Vec<_> = System::events() + .into_iter() + .filter_map( + |r| if let RuntimeEvent::MultiBlock(inner) = r.event { Some(inner) } else { None }, + ) + .collect(); + let seen = ElectionEventsIndex::get(); + ElectionEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} diff --git a/substrate/frame/staking-async/ahm-test/src/ah/mod.rs b/substrate/frame/staking-async/ahm-test/src/ah/mod.rs new file mode 100644 index 0000000000000..7a5aa43baf0df --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/src/ah/mod.rs @@ -0,0 +1,22 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod mock; +pub mod test; + +// re-export for easier use in dual runtime tests. +pub use mock::*; diff --git a/substrate/frame/staking-async/ahm-test/src/ah/test.rs b/substrate/frame/staking-async/ahm-test/src/ah/test.rs new file mode 100644 index 0000000000000..da35b48d1b0d5 --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/src/ah/test.rs @@ -0,0 +1,699 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::ah::mock::*; +use frame::prelude::Perbill; +use frame_support::{assert_noop, assert_ok}; +use pallet_election_provider_multi_block::{Event as ElectionEvent, Phase}; +use pallet_staking_async::{ + session_rotation::Rotator, ActiveEra, ActiveEraInfo, CurrentEra, Event as StakingEvent, +}; +use pallet_staking_async_rc_client as rc_client; +use pallet_staking_async_rc_client::ValidatorSetReport; + +// Tests that are specific to Asset Hub. +#[test] +fn on_receive_session_report() { + ExtBuilder::default().local_queue().build().execute_with(|| { + // GIVEN genesis state of ah + assert_eq!(System::block_number(), 1); + assert_eq!(CurrentEra::::get(), Some(0)); + assert_eq!(pallet_staking_async::ErasStartSessionIndex::::get(0), Some(0)); + assert_eq!(ActiveEra::::get(), Some(ActiveEraInfo { index: 0, start: Some(0) })); + + // WHEN session ends on RC and session report is received by AH. + let session_report = rc_client::SessionReport { + end_index: 0, + validator_points: (1..9).into_iter().map(|v| (v as AccountId, v * 10)).collect(), + activation_timestamp: None, + leftover: false, + }; + + assert_ok!(rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + session_report.clone(), + )); + + // THEN end 0, start 1, plan 2 + let era_points = pallet_staking_async::ErasRewardPoints::::get(&0); + assert_eq!(era_points.total, 360); + assert_eq!(era_points.individual.get(&1), Some(&10)); + assert_eq!(era_points.individual.get(&4), Some(&40)); + assert_eq!(era_points.individual.get(&7), Some(&70)); + assert_eq!(era_points.individual.get(&8), Some(&80)); + assert_eq!(era_points.individual.get(&9), None); + + // assert no era changed yet. + assert_eq!(CurrentEra::::get(), Some(0)); + assert_eq!(ActiveEra::::get(), Some(ActiveEraInfo { index: 0, start: Some(0) })); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::SessionRotated { + starting_session: 1, + active_era: 0, + planned_era: 0 + }] + ); + + assert_eq!(election_events_since_last_call(), vec![]); + + // roll two more sessions... + for i in 1..3 { + // roll some random number of blocks. + roll_many(10); + + // send the session report. + assert_ok!(rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + rc_client::SessionReport { + end_index: i, + validator_points: vec![(1, 10)], + activation_timestamp: None, + leftover: false, + } + )); + + let era_points = pallet_staking_async::ErasRewardPoints::::get(&0); + assert_eq!(era_points.total, 360 + i * 10); + assert_eq!(era_points.individual.get(&1), Some(&(10 + i * 10))); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::SessionRotated { + starting_session: i + 1, + active_era: 0, + planned_era: 0 + }] + ); + } + + // Next session we will begin election. + assert_ok!(rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + rc_client::SessionReport { + end_index: 3, + validator_points: vec![(1, 10)], + activation_timestamp: None, + leftover: false, + } + )); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::SessionRotated { + starting_session: 4, + active_era: 0, + // planned era 1 indicates election start signal is sent. + planned_era: 1 + }] + ); + + assert_eq!( + election_events_since_last_call(), + // Snapshot phase has started which will run for 3 blocks + vec![ElectionEvent::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(3) }] + ); + + // roll 3 blocks for signed phase, and one for the transition. + roll_many(3 + 1); + assert_eq!( + election_events_since_last_call(), + // Signed phase has started which will run for 3 blocks. + vec![ElectionEvent::PhaseTransitioned { + from: Phase::Snapshot(0), + to: Phase::Signed(3) + }] + ); + + // roll some blocks until election result is exported. + roll_many(14); + assert_eq!( + election_events_since_last_call(), + vec![ + ElectionEvent::PhaseTransitioned { + from: Phase::Signed(0), + to: Phase::SignedValidation(5) + }, + ElectionEvent::PhaseTransitioned { + from: Phase::SignedValidation(0), + to: Phase::Unsigned(3) + }, + ElectionEvent::PhaseTransitioned { from: Phase::Unsigned(0), to: Phase::Done }, + ] + ); + + // no staking event while election ongoing. + assert_eq!(staking_events_since_last_call(), vec![]); + // no xcm message sent yet. + assert_eq!(LocalQueue::get().unwrap(), vec![]); + + // next 3 block exports the election result to staking. + roll_many(3); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::PagedElectionProceeded { page: 2, result: Ok(4) }, + StakingEvent::PagedElectionProceeded { page: 1, result: Ok(0) }, + StakingEvent::PagedElectionProceeded { page: 0, result: Ok(0) } + ] + ); + + assert_eq!( + election_events_since_last_call(), + vec![ + ElectionEvent::PhaseTransitioned { from: Phase::Done, to: Phase::Export(2) }, + ElectionEvent::PhaseTransitioned { from: Phase::Export(0), to: Phase::Off } + ] + ); + + // New validator set xcm message is sent to RC. + assert_eq!( + LocalQueue::get().unwrap(), + vec![( + // this is the block number at which the message was sent. + 42, + OutgoingMessages::ValidatorSet(ValidatorSetReport { + new_validator_set: vec![3, 5, 6, 8], + id: 1, + prune_up_to: None, + leftover: false + }) + )] + ); + }) +} + +#[test] +fn roll_many_eras() { + // todo: + // - Ensure rewards can be claimed at correct era. + // - assert outgoing messages, including id and prune_up_to. + ExtBuilder::default().local_queue().build().execute_with(|| { + let mut session_counter: u32 = 0; + + let mut roll_session = |activate: bool| { + let activation_timestamp = if activate { + let current_era = CurrentEra::::get().unwrap(); + Some((current_era as u64 * 1000, current_era as u32)) + } else { + None + }; + + assert_ok!(rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + rc_client::SessionReport { + end_index: session_counter, + validator_points: vec![(1, 10)], + activation_timestamp, + leftover: false, + } + )); + + // increment session for the next iteration. + session_counter += 1; + + // run session blocks. + roll_many(60); + }; + + for era in 0..50 { + // --- first 3 idle session + for _ in 0..3 { + roll_session(false); + assert_eq!(ActiveEra::::get().unwrap().index, era); + assert_eq!(CurrentEra::::get().unwrap(), era); + } + + // ensure validator set not sent yet to RC. + // queue size same as in last iteration. + assert_eq!(LocalQueue::get().unwrap().len() as u32, era); + + // --- plan era session + roll_session(false); + assert_eq!(ActiveEra::::get().unwrap().index, era); + assert_eq!(CurrentEra::::get().unwrap(), era + 1); + + // ensure new validator set sent to RC. + // length increases by 1. + assert_eq!(LocalQueue::get().unwrap().len() as u32, era + 1); + + // --- 5th starting session, idle + roll_session(false); + assert_eq!(ActiveEra::::get().unwrap().index, era); + assert_eq!(CurrentEra::::get().unwrap(), era + 1); + + // --- 6th the era rotation session + roll_session(true); + assert_eq!(ActiveEra::::get().unwrap().index, era + 1); + assert_eq!(CurrentEra::::get().unwrap(), era + 1); + } + }); +} + +#[test] +fn receives_old_session_report() { + ExtBuilder::default().local_queue().build().execute_with(|| { + // Initial state + assert_eq!(CurrentEra::::get(), Some(0)); + assert_eq!(pallet_staking_async::ErasStartSessionIndex::::get(0), Some(0)); + assert_eq!(ActiveEra::::get(), Some(ActiveEraInfo { index: 0, start: Some(0) })); + assert_eq!(rc_client::LastSessionReportEndingIndex::::get(), None); + + // Receive report for end of 1, start of 1 and plan 2. + let session_report = rc_client::SessionReport { + end_index: 0, + validator_points: vec![(5, 50)], + activation_timestamp: None, + leftover: false, + }; + + assert_ok!(rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + session_report.clone(), + )); + + // then + assert_eq!(rc_client::LastSessionReportEndingIndex::::get(), Some(0)); + assert_eq!( + rc_client_events_since_last_call(), + vec![rc_client::Event::SessionReportReceived { + end_index: 0, + activation_timestamp: None, + validator_points_counts: 1, + leftover: false + }] + ); + assert_eq!( + staking_events_since_last_call(), + vec![pallet_staking_async::Event::SessionRotated { + starting_session: 1, + active_era: 0, + planned_era: 0 + }] + ); + + // reward points are added + assert_eq!(pallet_staking_async::ErasRewardPoints::::get(&0).total, 50); + + // this is ok, but no new session report is received in staking. + assert_noop!( + rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + session_report.clone(), + ), + rc_client::Error::::SessionIndexNotValid + ); + }) +} + +#[test] +fn receives_session_report_in_future() { + ExtBuilder::default().local_queue().build().execute_with(|| { + // Initial state + assert_eq!(CurrentEra::::get(), Some(0)); + assert_eq!(pallet_staking_async::ErasStartSessionIndex::::get(0), Some(0)); + assert_eq!(ActiveEra::::get(), Some(ActiveEraInfo { index: 0, start: Some(0) })); + assert_eq!(rc_client::LastSessionReportEndingIndex::::get(), None); + + // Receive report for end of 1, start of 1 and plan 2. + + assert_ok!(rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + rc_client::SessionReport { + end_index: 0, + validator_points: vec![(5, 50)], + activation_timestamp: None, + leftover: false, + }, + )); + + // then + assert_eq!(rc_client::LastSessionReportEndingIndex::::get(), Some(0)); + assert_eq!( + rc_client_events_since_last_call(), + vec![rc_client::Event::SessionReportReceived { + end_index: 0, + activation_timestamp: None, + validator_points_counts: 1, + leftover: false + }] + ); + assert_eq!( + staking_events_since_last_call(), + vec![pallet_staking_async::Event::SessionRotated { + starting_session: 1, + active_era: 0, + planned_era: 0 + }] + ); + + // reward points are added + assert_eq!(pallet_staking_async::ErasRewardPoints::::get(&0).total, 50); + + // skip end_index 1 + assert_noop!( + rc_client::Pallet::::relay_session_report( + RuntimeOrigin::root(), + rc_client::SessionReport { + end_index: 2, + validator_points: vec![(5, 50)], + activation_timestamp: None, + leftover: false, + }, + ), + rc_client::Error::::SessionIndexNotValid + ); + }) +} + +#[test] +fn on_offence_current_era() { + ExtBuilder::default().local_queue().build().execute_with(|| { + let active_validators = roll_until_next_active(0); + assert_eq!(pallet_staking_async::ErasStartSessionIndex::::get(1), Some(5)); + assert_eq!(active_validators, vec![3, 5, 6, 8]); + + // flush the events. + let _ = staking_events_since_last_call(); + + assert_ok!(rc_client::Pallet::::relay_new_offence( + RuntimeOrigin::root(), + 5, + vec![ + rc_client::Offence { + offender: 5, + reporters: vec![], + slash_fraction: Perbill::from_percent(50), + }, + rc_client::Offence { + offender: 3, + reporters: vec![], + slash_fraction: Perbill::from_percent(50), + } + ] + )); + + assert_eq!( + staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::OffenceReported { + offence_era: 1, + validator: 5, + fraction: Perbill::from_percent(50) + }, + pallet_staking_async::Event::OffenceReported { + offence_era: 1, + validator: 3, + fraction: Perbill::from_percent(50) + } + ] + ); + + // 2 blocks to process these offences, and they are deferred. + roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![pallet_staking_async::Event::SlashComputed { + offence_era: 1, + slash_era: 3, + offender: 5, + page: 0 + },] + ); + roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![pallet_staking_async::Event::SlashComputed { + offence_era: 1, + slash_era: 3, + offender: 3, + page: 0 + }] + ); + + // skip two eras + assert_eq!(SlashDeferredDuration::get(), 2); + roll_until_next_active(5); + roll_until_next_active(10); + let _ = staking_events_since_last_call(); + + // 2 blocks to apply the slashes + roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![pallet_staking_async::Event::Slashed { staker: 3, amount: 50 },] + ); + roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::Slashed { staker: 5, amount: 50 }, + pallet_staking_async::Event::Slashed { staker: 110, amount: 50 } + ] + ); + }); +} + +#[test] +fn on_offence_current_era_instant_apply() { + ExtBuilder::default() + .local_queue() + .slash_defer_duration(0) + .build() + .execute_with(|| { + let active_validators = roll_until_next_active(0); + assert_eq!(pallet_staking_async::ErasStartSessionIndex::::get(1), Some(5)); + assert_eq!(active_validators, vec![3, 5, 6, 8]); + + // flush the events. + let _ = staking_events_since_last_call(); + + assert_ok!(rc_client::Pallet::::relay_new_offence( + RuntimeOrigin::root(), + 5, + vec![ + rc_client::Offence { + offender: 5, + reporters: vec![], + slash_fraction: Perbill::from_percent(50), + }, + rc_client::Offence { + offender: 3, + reporters: vec![], + slash_fraction: Perbill::from_percent(50), + } + ] + )); + + assert_eq!( + staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::OffenceReported { + offence_era: 1, + validator: 5, + fraction: Perbill::from_percent(50) + }, + pallet_staking_async::Event::OffenceReported { + offence_era: 1, + validator: 3, + fraction: Perbill::from_percent(50) + } + ] + ); + + // 2 blocks to process these offences, and they are applied on the spot. + roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::SlashComputed { + offence_era: 1, + slash_era: 1, + offender: 5, + page: 0 + }, + pallet_staking_async::Event::Slashed { staker: 5, amount: 50 }, + pallet_staking_async::Event::Slashed { staker: 110, amount: 50 } + ] + ); + roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::SlashComputed { + offence_era: 1, + slash_era: 1, + offender: 3, + page: 0 + }, + pallet_staking_async::Event::Slashed { staker: 3, amount: 50 } + ] + ); + }); +} + +#[test] +fn on_offence_non_validator() { + ExtBuilder::default() + .slash_defer_duration(0) + .local_queue() + .build() + .execute_with(|| { + let active_validators = roll_until_next_active(0); + assert_eq!(pallet_staking_async::ErasStartSessionIndex::::get(1), Some(5)); + assert_eq!(active_validators, vec![3, 5, 6, 8]); + + // flush the events. + let _ = staking_events_since_last_call(); + + assert_ok!(rc_client::Pallet::::relay_new_offence( + RuntimeOrigin::root(), + 5, + vec![rc_client::Offence { + // this offender is unknown to the staking pallet. + offender: 666, + reporters: vec![], + slash_fraction: Perbill::from_percent(50), + }] + )); + + // nada + assert_eq!(staking_events_since_last_call(), vec![]); + }); +} + +#[test] +fn on_offence_previous_era() { + ExtBuilder::default().local_queue().build().execute_with(|| { + let _ = roll_until_next_active(0); + let _ = roll_until_next_active(5); + let active_validators = roll_until_next_active(10); + + assert_eq!(active_validators, vec![3, 5, 6, 8]); + assert_eq!(Rotator::::active_era(), 3); + + // flush the events. + let _ = staking_events_since_last_call(); + + // report an offence for the session belonging to the previous era + assert_eq!(pallet_staking_async::ErasStartSessionIndex::::get(1), Some(5)); + + assert_ok!(rc_client::Pallet::::relay_new_offence( + RuntimeOrigin::root(), + // offence is in era 1 + 5, + vec![rc_client::Offence { + offender: 3, + reporters: vec![], + slash_fraction: Perbill::from_percent(50), + }] + )); + + // reported + assert_eq!( + staking_events_since_last_call(), + vec![pallet_staking_async::Event::OffenceReported { + offence_era: 1, + validator: 3, + fraction: Perbill::from_percent(50) + }] + ); + + // computed, and instantly applied, as we are already on era 3 (slash era = 1, defer = 2) + roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::SlashComputed { + offence_era: 1, + slash_era: 3, + offender: 3, + page: 0 + }, + pallet_staking_async::Event::Slashed { staker: 3, amount: 50 } + ] + ); + + // nothing left + roll_next(); + assert_eq!(staking_events_since_last_call(), vec![]); + }); +} + +#[test] +fn on_offence_previous_era_instant_apply() { + ExtBuilder::default() + .slash_defer_duration(0) + .local_queue() + .build() + .execute_with(|| { + let _ = roll_until_next_active(0); + let _ = roll_until_next_active(5); + let active_validators = roll_until_next_active(10); + + assert_eq!(active_validators, vec![3, 5, 6, 8]); + assert_eq!(Rotator::::active_era(), 3); + + // flush the events. + let _ = staking_events_since_last_call(); + + // report an offence for the session belonging to the previous era + assert_eq!(pallet_staking_async::ErasStartSessionIndex::::get(1), Some(5)); + + assert_ok!(rc_client::Pallet::::relay_new_offence( + RuntimeOrigin::root(), + // offence is in era 1 + 5, + vec![rc_client::Offence { + offender: 3, + reporters: vec![], + slash_fraction: Perbill::from_percent(50), + }] + )); + + // reported + assert_eq!( + staking_events_since_last_call(), + vec![pallet_staking_async::Event::OffenceReported { + offence_era: 1, + validator: 3, + fraction: Perbill::from_percent(50) + }] + ); + + // applied right away + roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::SlashComputed { + offence_era: 1, + slash_era: 1, + offender: 3, + page: 0 + }, + pallet_staking_async::Event::Slashed { staker: 3, amount: 50 } + ] + ); + + // nothing left + roll_next(); + assert_eq!(staking_events_since_last_call(), vec![]); + }); +} diff --git a/substrate/frame/staking-async/ahm-test/src/lib.rs b/substrate/frame/staking-async/ahm-test/src/lib.rs new file mode 100644 index 0000000000000..faac7e7f7d500 --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/src/lib.rs @@ -0,0 +1,479 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(test)] +pub mod ah; +#[cfg(test)] +pub mod rc; + +#[cfg(test)] +pub mod shared; + +// shared tests. +#[cfg(test)] +mod tests { + use super::*; + use crate::rc::RootOffences; + use ah_client::OperatingMode; + use frame::testing_prelude::*; + use frame_support::traits::Get; + use pallet_election_provider_multi_block as multi_block; + use pallet_staking as staking_classic; + use pallet_staking_async::{ActiveEra, ActiveEraInfo, Forcing}; + use pallet_staking_async_ah_client as ah_client; + use pallet_staking_async_rc_client as rc_client; + + #[test] + fn rc_session_change_reported_to_ah() { + // sets up AH chain with current and active era. + shared::put_ah_state(ah::ExtBuilder::default().build()); + shared::put_rc_state(rc::ExtBuilder::default().build()); + // shared::RC_STATE.with(|state| *state.get_mut() = rc::ExtBuilder::default().build()); + + // initial state of ah + shared::in_ah(|| { + assert_eq!(frame_system::Pallet::::block_number(), 1); + assert_eq!(pallet_staking_async::CurrentEra::::get(), Some(0)); + assert_eq!( + ActiveEra::::get(), + Some(ActiveEraInfo { index: 0, start: Some(0) }) + ); + }); + + shared::in_rc(|| { + // initial state of rc + assert_eq!(ah_client::Mode::::get(), OperatingMode::Active); + // go to session 1 in RC and test. + // when + assert!(frame_system::Pallet::::block_number() == 1); + + // given end session 0, start session 1, plan 2 + rc::roll_until_matches( + || pallet_session::CurrentIndex::::get() == 1, + true, + ); + + // then + assert_eq!(frame_system::Pallet::::block_number(), rc::Period::get()); + }); + + shared::in_rc(|| { + // roll a few more sessions + rc::roll_until_matches( + || pallet_session::CurrentIndex::::get() == 4, + true, + ); + }); + + shared::in_ah(|| { + // ah's rc-client has also progressed some blocks, equal to 4 sessions + assert_eq!(frame_system::Pallet::::block_number(), 120); + // election is ongoing, and has just started + assert!(matches!( + multi_block::CurrentPhase::::get(), + multi_block::Phase::Snapshot(_) + )); + }); + + // go to session 5 in rc, and forward AH too. + shared::in_rc(|| { + rc::roll_until_matches( + || pallet_session::CurrentIndex::::get() == 5, + true, + ); + }); + + // ah has bumped the current era, but not the active era + shared::in_ah(|| { + assert_eq!(pallet_staking_async::CurrentEra::::get(), Some(1)); + assert_eq!( + ActiveEra::::get(), + Some(ActiveEraInfo { index: 0, start: Some(0) }) + ); + }); + + // go to session 6 in rc, and forward AH too. + shared::in_rc(|| { + rc::roll_until_matches( + || pallet_session::CurrentIndex::::get() == 6, + true, + ); + }); + } + + #[test] + fn ah_takes_over_staking_post_migration() { + // SCENE (1): Pre AHM Migration + shared::put_rc_state( + rc::ExtBuilder::default() + .pre_migration() + // set session keys for all "potential" validators + .session_keys(vec![1, 2, 3, 4, 5, 6, 7, 8]) + .build(), + ); + shared::put_ah_state(ah::ExtBuilder::default().build()); + + shared::in_rc(|| { + assert!(staking_classic::ActiveEra::::get().is_none()); + + // - staking-classic is active on RC. + rc::roll_until_matches( + || { + staking_classic::ActiveEra::::get().map(|a| a.index).unwrap_or(0) == + 1 + }, + true, + ); + + // No offence exist so far + assert!(staking_classic::UnappliedSlashes::::get(4).is_empty()); + + dbg!(pallet_session::Validators::::get()); + + assert_ok!(RootOffences::create_offence( + rc::RuntimeOrigin::root(), + vec![(2, Perbill::from_percent(100))], + None, + None + )); + + // offence is expected to be deferred to era 1 + 3 = 4 + assert_eq!(staking_classic::UnappliedSlashes::::get(4).len(), 1); + }); + + // nothing happened in ah-staking so far + shared::in_ah(|| { + // Ensure AH does not receive any + // - offences + // - session change reports. + assert_eq!(shared::CounterRCAHNewOffence::get(), 0); + assert_eq!(shared::CounterRCAHSessionReport::get(), 0); + + assert_eq!(ah::mock::staking_events_since_last_call(), vec![]); + }); + + // SCENE (2): AHM migration begins + let mut pre_migration_block_number = 0; + shared::in_rc(|| { + rc::roll_next(); + + let pre_migration_era_points = + staking_classic::ErasRewardPoints::::get(1).total; + + ah_client::Pallet::::on_migration_start(); + assert_eq!(ah_client::Mode::::get(), OperatingMode::Buffered); + + // get current session + let mut current_session = pallet_session::CurrentIndex::::get(); + pre_migration_block_number = frame_system::Pallet::::block_number(); + + // assume migration takes at least one era + // go forward by more than `SessionsPerEra` sessions -- staking will not rotate a new + // era. + rc::roll_until_matches( + || { + pallet_session::CurrentIndex::::get() == + current_session + ah::SessionsPerEra::get() + 1 + }, + true, + ); + current_session = pallet_session::CurrentIndex::::get(); + let migration_start_block_number = frame_system::Pallet::::block_number(); + + // ensure era is still 1 on RC. + // (Session events are received by AHClient and never passed on to staking-classic once + // migration starts) + assert_eq!(staking_classic::ActiveEra::::get().unwrap().index, 1); + // no new era is planned + assert_eq!(staking_classic::CurrentEra::::get().unwrap(), 1); + + // no new block author points accumulated + assert_eq!( + staking_classic::ErasRewardPoints::::get(1).total, + pre_migration_era_points + ); + + // some validator points have been recorded in ah-client + assert_eq!( + ah_client::ValidatorPoints::::iter().count(), + 1, + "only 11 has authored blocks in rc" + ); + assert_eq!( + ah_client::ValidatorPoints::::get(&11), + (migration_start_block_number - pre_migration_block_number) as u32 * + <::PointsPerBlock as Get>::get() + ); + + // let's create a new offence. + assert_ok!(RootOffences::create_offence( + rc::RuntimeOrigin::root(), + vec![(5, Perbill::from_percent(100))], + None, + None, + )); + + // no new unapplied slashes are created (other than the previously created). + assert_eq!(staking_classic::UnappliedSlashes::::get(4).len(), 1); + + // there is a buffered offence in the AHClient. + assert_eq!(ah_client::BufferedOffences::::get().len(), 1); + assert_eq!( + ah_client::BufferedOffences::::get()[0], + ( + current_session, + vec![rc_client::Offence { + offender: 5, + reporters: vec![], + slash_fraction: Perbill::from_percent(100), + }], + ) + ); + }); + + // Ensure AH still does not receive any offence while migration is ongoing. + shared::in_ah(|| { + assert_eq!(shared::CounterRCAHNewOffence::get(), 0); + assert_eq!(shared::CounterRCAHSessionReport::get(), 0); + + assert_eq!(ah::mock::staking_events_since_last_call(), vec![]); + }); + + // let's migrate state from RC::staking-classic to AH::staking-async + shared::migrate_state(); + + // SCENE (3): AHM migration ends. + shared::in_rc(|| { + ah_client::Pallet::::on_migration_end(); + assert_eq!(ah_client::Mode::::get(), OperatingMode::Active); + + // offence in the migration period is reported to AH. + assert_eq!(shared::CounterRCAHNewOffence::get(), 1); + }); + + let mut post_migration_era_reward_points = 0; + shared::in_ah(|| { + post_migration_era_reward_points = + pallet_staking_async::ErasRewardPoints::::get(1).total; + // staking async has always been in NotForcing, not doing anything since no session + // reports come in + assert_eq!(pallet_staking_async::ForceEra::::get(), Forcing::NotForcing); + + assert_eq!( + pallet_staking_async::OffenceQueue::::get(1, 5).unwrap(), + pallet_staking_async::slashing::OffenceRecord { + reporter: None, + reported_era: 1, + exposure_page: 0, + slash_fraction: Perbill::from_percent(100), + prior_slash_fraction: Perbill::from_percent(0), + } + ); + + // next block would process this offence + ah::roll_next(); + + assert_eq!( + ah::mock::staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::OffenceReported { + offence_era: 1, + validator: 5, + fraction: Perbill::from_percent(100) + }, + pallet_staking_async::Event::SlashComputed { + offence_era: 1, + slash_era: 3, + offender: 5, + page: 0 + }, + ] + ); + + assert_eq!(pallet_staking_async::OffenceQueue::::get(1, 5), None); + // offence is deferred by two eras, ie 1 + 2 = 3. Note that this is one era less than + // staking-classic since slashing happens in multi-block, and we want to apply all + // slashes before the era 4 starts. + assert!(pallet_staking_async::UnappliedSlashes::::get( + 3, + (5, Perbill::from_percent(100), 0) + ) + .is_some()); + }); + + // NOW: lets verify we kick off the election at the appropriate time + shared::in_ah(|| { + // roll another block just to strongly prove election is not kicked off at the end of + // migration. + ah::roll_next(); + + // ensure no election is kicked off yet + // (when election is kicked off, current_era = active_era + 1) + assert_eq!(pallet_staking_async::CurrentEra::::get(), Some(1)); + assert_eq!(pallet_staking_async::ActiveEra::::get().unwrap().index, 1); + // also no session report is sent to AH yet. + assert_eq!(shared::CounterRCAHSessionReport::get(), 0); + }); + + // It was more than 6 sessions since the last election, on RC, so an election is already + // overdue. The next session change should trigger an election. + + let mut post_migration_session_block_number = 0; + shared::in_rc(|| { + assert_eq!(pallet_session::CurrentIndex::::get(), 12); + rc::roll_until_matches( + || pallet_session::CurrentIndex::::get() == 13, + true, + ); + post_migration_session_block_number = + frame_system::Pallet::::block_number(); + + // all the buffered validators points are flushed + assert_eq!(ah_client::ValidatorPoints::::iter().count(), 0,); + }); + + // AH receives the session report. + assert_eq!(shared::CounterRCAHSessionReport::get(), 1); + shared::in_ah(|| { + assert_eq!(pallet_staking_async::ActiveEra::::get().unwrap().index, 1); + assert_eq!(pallet_staking_async::CurrentEra::::get(), Some(1 + 1)); + + // by now one session report should have been received in staking + assert_eq!( + ah::rc_client_events_since_last_call(), + vec![ + rc_client::Event::OffenceReceived { slash_session: 12, offences_count: 1 }, + rc_client::Event::SessionReportReceived { + end_index: 12, + activation_timestamp: None, + validator_points_counts: 1, + leftover: false + } + ] + ); + + assert_eq!( + ah::mock::staking_events_since_last_call(), + vec![pallet_staking_async::Event::SessionRotated { + starting_session: 13, + active_era: 1, + planned_era: 2 + }] + ); + + // all expected era reward points are here + assert_eq!( + pallet_staking_async::ErasRewardPoints::::get(1).total, + ((post_migration_session_block_number - pre_migration_block_number) * 20) as u32 + + // --- ^^ these were buffered in ah-client + post_migration_era_reward_points // --- ^^ these were migrated as part of AHM + ); + + // ensure new validator is sent once election is complete. + ah::roll_until_matches(|| shared::CounterAHRCValidatorSet::get() == 1, true); + + assert_eq!( + ah::staking_events_since_last_call(), + vec![ + pallet_staking_async::Event::PagedElectionProceeded { page: 2, result: Ok(4) }, + pallet_staking_async::Event::PagedElectionProceeded { page: 1, result: Ok(0) }, + pallet_staking_async::Event::PagedElectionProceeded { page: 0, result: Ok(0) } + ] + ); + }); + + shared::in_rc(|| { + assert_eq!( + rc::ah_client_events_since_last_call(), + vec![ah_client::Event::ValidatorSetReceived { + id: 2, + new_validator_set_count: 4, + prune_up_to: None, + leftover: false + }] + ); + + let (planned_era, next_validator_set) = + ah_client::ValidatorSet::::get().unwrap(); + + assert_eq!(planned_era, 2); + assert!(next_validator_set.len() >= rc::MinimumValidatorSetSize::get() as usize); + }); + + shared::in_ah(|| { + assert_eq!(pallet_staking_async::ActiveEra::::get().unwrap().index, 1); + // at next session, the validator set is queued but not applied yet. + ah::roll_until_matches(|| shared::CounterRCAHSessionReport::get() == 2, true); + // active era is still 1. + assert_eq!(pallet_staking_async::ActiveEra::::get().unwrap().index, 1); + // the following session, the validator set is applied. + ah::roll_until_matches(|| shared::CounterRCAHSessionReport::get() == 3, true); + assert_eq!(pallet_staking_async::ActiveEra::::get().unwrap().index, 2); + }); + } + + #[test] + fn election_result_on_ah_reported_to_rc() { + // when election result is complete + // staking stores all exposures + // validators reported to rc + // validators enacted for next session + } + + #[test] + fn rc_continues_with_same_validators_if_ah_is_late() { + // A test where ah is late to give us election result. + } + + #[test] + fn authoring_points_reported_to_ah_per_session() {} + + #[test] + fn rc_is_late_to_report_session_change() {} + + #[test] + fn pruning_is_at_least_bonding_duration() {} + + #[test] + fn ah_eras_are_delayed() { + // rc will trigger new sessions, + // ah cannot start a new era (election fail) + // we don't prune anything, because era should not be increased. + } + + #[test] + fn ah_know_good_era_duration() { + // era duration and rewards work. + } + + #[test] + fn election_provider_fails_to_start() { + // call to ElectionProvider::start fails because it is already ongoing. What do we do? + } + + #[test] + fn overlapping_election() { + // while one election is ongoing, enough sessions pass that we think we should plan yet + // another era. + } + + #[test] + fn session_report_burst() { + // AH is offline for a while, and it suddenly receives 3 eras worth of session reports. What + // do we do? + } +} diff --git a/substrate/frame/staking-async/ahm-test/src/rc/mock.rs b/substrate/frame/staking-async/ahm-test/src/rc/mock.rs new file mode 100644 index 0000000000000..128d0a1150e12 --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/src/rc/mock.rs @@ -0,0 +1,526 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use ah_client::OperatingMode; +use frame::{ + deps::sp_runtime::testing::UintAuthorityId, testing_prelude::*, traits::fungible::Mutate, +}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; +use frame_support::traits::FindAuthor; +use pallet_staking_async_ah_client as ah_client; +use sp_staking::SessionIndex; + +use crate::shared; + +pub type T = Runtime; + +construct_runtime! { + pub enum Runtime { + System: frame_system, + Authorship: pallet_authorship, + Balances: pallet_balances, + Timestamp: pallet_timestamp, + + Session: pallet_session, + SessionHistorical: pallet_session::historical, + Staking: pallet_staking, + StakingAhClient: pallet_staking_async_ah_client, + RootOffences: pallet_root_offences, + } +} + +pub fn roll_next() { + let now = System::block_number(); + let next = now + 1; + + System::set_block_number(next); + // Timestamp is always the RC block number * 1000 + Timestamp::set_timestamp(next * 1000); + Authorship::on_initialize(next); + + Session::on_initialize(next); + StakingAhClient::on_initialize(next); + Staking::on_initialize(next); + Staking::on_finalize(next); +} + +parameter_types! { + /// The maximum number of blocks to roll before we stop rolling. + /// + /// Avoids infinite loops in tests. + pub static MaxRollsUntilCriteria: u16 = 1000; +} + +pub fn roll_until_matches(criteria: impl Fn() -> bool, with_ah: bool) { + let mut rolls = 0; + while !criteria() { + roll_next(); + rolls += 1; + if with_ah { + if LocalQueue::get().is_some() { + panic!("when local queue is set, you cannot roll ah forward as well!") + } + shared::in_ah(|| { + crate::ah::roll_next(); + }); + } + + if rolls > MaxRollsUntilCriteria::get() { + panic!("rolled too many times"); + } + } +} + +pub type AccountId = ::AccountId; +pub type Balance = ::Balance; +pub type Hash = ::Hash; +pub type BlockNumber = BlockNumberFor; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type Block = MockBlock; + type AccountData = pallet_balances::AccountData; +} + +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Runtime { + type Balance = u128; + type AccountStore = System; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<3>; + type WeightInfo = (); +} + +pub struct ValidatorIdOf; +impl Convert> for ValidatorIdOf { + fn convert(a: AccountId) -> Option { + Some(a) + } +} + +pub struct OtherSessionHandler; +impl OneSessionHandler for OtherSessionHandler { + type Key = UintAuthorityId; + + fn on_genesis_session<'a, I: 'a>(_: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_new_session<'a, I: 'a>(_: bool, _: I, _: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_disabled(_validator_index: u32) {} +} + +impl BoundToRuntimeAppPublic for OtherSessionHandler { + type Public = UintAuthorityId; +} + +frame::deps::sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub other: OtherSessionHandler, + } +} + +parameter_types! { + pub static Period: BlockNumber = 30; + pub static Offset: BlockNumber = 0; +} + +impl pallet_session::historical::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type FullIdentification = sp_staking::Exposure; + type FullIdentificationOf = ah_client::DefaultExposureOf; +} + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + + type ValidatorIdOf = ValidatorIdOf; + type ValidatorId = AccountId; + + type DisablingStrategy = pallet_session::disabling::UpToLimitDisablingStrategy<1>; + + type Keys = SessionKeys; + type SessionHandler = ::KeyTypeIdProviders; + + type NextSessionRotation = Self::ShouldEndSession; + type ShouldEndSession = pallet_session::PeriodicSessions; + + // Should be AH-client + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + + type WeightInfo = (); +} + +parameter_types! { + pub static DefaultAuthor: Option = Some(11); +} + +pub struct GetAuthor; +impl FindAuthor for GetAuthor { + fn find_author<'a, I>(_digests: I) -> Option + where + I: 'a + IntoIterator, + { + DefaultAuthor::get() + } +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = GetAuthor; + type EventHandler = StakingAhClient; +} + +parameter_types! { + pub static MaxBackersPerWinner: u32 = 256; + pub static MaxWinnersPerPage: u32 = 100; + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); +} +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type System = Runtime; + type Solver = SequentialPhragmen; + type DataProvider = Staking; + type WeightInfo = (); + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; + type Bounds = ElectionsBounds; + type Sort = ConstBool; +} + +#[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] +impl pallet_staking::Config for Runtime { + type OldCurrency = Balances; + type Currency = Balances; + type UnixTime = pallet_timestamp::Pallet; + type AdminOrigin = frame_system::EnsureRoot; + type EraPayout = (); + type ElectionProvider = onchain::OnChainExecution; + type GenesisElectionProvider = Self::ElectionProvider; + type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking::UseValidatorsMap; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type SlashDeferDuration = ConstU32<2>; + type SessionInterface = Self; + type BondingDuration = ConstU32<3>; +} + +impl pallet_root_offences::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OffenceHandler = StakingAhClient; +} + +#[derive(Clone, Debug, PartialEq)] +pub enum OutgoingMessages { + SessionReport(rc_client::SessionReport), + OffenceReport(SessionIndex, Vec>), +} + +parameter_types! { + pub static MinimumValidatorSetSize: u32 = 4; + pub static LocalQueue: Option> = None; + pub static LocalQueueLastIndex: usize = 0; +} + +impl LocalQueue { + pub fn get_since_last_call() -> Vec<(BlockNumber, OutgoingMessages)> { + if let Some(all) = Self::get() { + let last = LocalQueueLastIndex::get(); + LocalQueueLastIndex::set(all.len()); + all.into_iter().skip(last).collect() + } else { + panic!("Must set local_queue()!") + } + } + + pub fn flush() { + let _ = Self::get_since_last_call(); + } +} + +impl ah_client::Config for Runtime { + type CurrencyBalance = Balance; + type AdminOrigin = EnsureRoot; + type SendToAssetHub = DeliverToAH; + type AssetHubOrigin = EnsureSigned; + type UnixTime = Timestamp; + type MinimumValidatorSetSize = MinimumValidatorSetSize; + type PointsPerBlock = ConstU32<20>; + type SessionInterface = Self; + type Fallback = Staking; +} + +use pallet_staking_async_rc_client::{self as rc_client, ValidatorSetReport}; +pub struct DeliverToAH; +impl ah_client::SendToAssetHub for DeliverToAH { + type AccountId = AccountId; + fn relay_new_offence( + session_index: SessionIndex, + offences: Vec>, + ) { + if let Some(mut local_queue) = LocalQueue::get() { + local_queue.push(( + System::block_number(), + OutgoingMessages::OffenceReport(session_index, offences), + )); + LocalQueue::set(Some(local_queue)); + } else { + shared::CounterRCAHNewOffence::mutate(|x| *x += 1); + shared::in_ah(|| { + let origin = crate::ah::RuntimeOrigin::root(); + rc_client::Pallet::::relay_new_offence( + origin, + session_index, + offences.clone(), + ) + .unwrap(); + }); + } + } + + fn relay_session_report(session_report: rc_client::SessionReport) { + if let Some(mut local_queue) = LocalQueue::get() { + local_queue + .push((System::block_number(), OutgoingMessages::SessionReport(session_report))); + LocalQueue::set(Some(local_queue)); + } else { + shared::CounterRCAHSessionReport::mutate(|x| *x += 1); + shared::in_ah(|| { + let origin = crate::ah::RuntimeOrigin::root(); + rc_client::Pallet::::relay_session_report( + origin, + session_report.clone(), + ) + .unwrap(); + }); + } + } +} + +parameter_types! { + pub static SessionEventsIndex: usize = 0; + pub static HistoricalEventsIndex: usize = 0; + pub static AhClientEventsIndex: usize = 0; +} + +pub fn historical_events_since_last_call() -> Vec> { + let all = frame_system::Pallet::::read_events_for_pallet::< + pallet_session::historical::Event, + >(); + let seen = HistoricalEventsIndex::get(); + HistoricalEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} + +pub fn session_events_since_last_call() -> Vec> { + let all = + frame_system::Pallet::::read_events_for_pallet::>(); + let seen = SessionEventsIndex::get(); + SessionEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} + +pub fn ah_client_events_since_last_call() -> Vec> { + let all = + frame_system::Pallet::::read_events_for_pallet::>(); + let seen = AhClientEventsIndex::get(); + AhClientEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} + +const INITIAL_STAKE: Balance = 100; +const INITIAL_BALANCE: Balance = 1000; + +pub struct ExtBuilder { + session_keys: Vec, + pre_migration: bool, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { session_keys: vec![], pre_migration: false } + } +} + +impl ExtBuilder { + /// Set this if you want to test the rc-runtime locally. This will push outgoing messages to + /// `LocalQueue` instead of enacting them on AH. + pub fn local_queue(self) -> Self { + LocalQueue::set(Some(Default::default())); + self + } + + /// Set the session keys for the given accounts. + pub fn session_keys(mut self, session_keys: Vec) -> Self { + self.session_keys = session_keys; + self + } + + /// Don't set 11 as the automatic block author of every block + pub fn no_default_author(self) -> Self { + DefaultAuthor::set(None); + self + } + + /// Set the staking-classic state to be pre-AHM-migration state. + pub fn pre_migration(mut self) -> Self { + self.pre_migration = true; + self + } + + /// Set the smallest number of validators to be received by ah-client + pub fn minimum_validator_set_size(self, size: u32) -> Self { + MinimumValidatorSetSize::set(size); + self + } + + pub fn build(self) -> TestState { + let _ = sp_tracing::try_init_simple(); + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + // add pre-migration state to staking-classic. + let operating_mode = if self.pre_migration { + let validators = vec![1, 2, 3, 4, 5, 6, 7, 8] + .into_iter() + .map(|x| (x, x, INITIAL_STAKE, pallet_staking::StakerStatus::Validator)); + + let nominators = vec![ + (100, vec![1, 2]), + (101, vec![2, 5]), + (102, vec![1, 1]), + (103, vec![3, 3]), + (104, vec![1, 5]), + (105, vec![5, 4]), + (106, vec![6, 2]), + (107, vec![1, 6]), + (108, vec![2, 7]), + (109, vec![4, 8]), + (110, vec![5, 2]), + (111, vec![6, 6]), + (112, vec![8, 1]), + ] + .into_iter() + .map(|(x, y)| (x, x, INITIAL_STAKE, pallet_staking_async::StakerStatus::Nominator(y))); + + let stakers = validators.chain(nominators).collect::>(); + let balances = stakers + .clone() + .into_iter() + .map(|(x, _, _, _)| (x, INITIAL_BALANCE)) + .collect::>(); + + pallet_balances::GenesisConfig:: { balances, ..Default::default() } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_staking::GenesisConfig:: { + stakers, + validator_count: 4, + minimum_validator_count: 2, + ..Default::default() + } + .assimilate_storage(&mut t) + .unwrap(); + + // Set ah client in passive mode -> implies it is inactive and staking-classic is + // active. + OperatingMode::Passive + } else { + OperatingMode::Active + }; + + let mut state: TestState = t.into(); + + state.execute_with(|| { + // so events can be deposited. + roll_next(); + + for v in self.session_keys { + // min some funds, create account and ref counts + pallet_balances::Pallet::::mint_into(&v, INITIAL_BALANCE).unwrap(); + pallet_session::Pallet::::set_keys( + RuntimeOrigin::signed(v), + SessionKeys { other: UintAuthorityId(v) }, + vec![], + ) + .unwrap(); + } + + ah_client::Mode::::put(operating_mode); + }); + + state + } +} + +/// Progress until `sessions`, receive a `new_validator_set` with `id`, and go forward to `sessions +/// + 1` such that it is queued in pallet-session. If `active`, then progress until `sessions + 2` +/// such that it is in the active session validators. +pub(crate) fn receive_validator_set_at( + sessions: SessionIndex, + id: u32, + new_validator_set: Vec, + activate: bool, +) { + roll_until_matches(|| pallet_session::CurrentIndex::::get() == sessions, false); + assert_eq!(pallet_session::CurrentIndex::::get(), sessions); + + let report = ValidatorSetReport { + id, + prune_up_to: None, + leftover: false, + new_validator_set: new_validator_set.clone(), + }; + + assert_ok!(ah_client::Pallet::::validator_set(RuntimeOrigin::root(), report)); + + // go forward till one more session such that these validators are in the session queue now + roll_until_matches(|| pallet_session::CurrentIndex::::get() == sessions + 1, false); + assert_eq!(pallet_session::CurrentIndex::::get(), sessions + 1); + + assert_eq!( + pallet_session::QueuedKeys::::get() + .into_iter() + .map(|(x, _)| x) + .collect::>(), + new_validator_set.clone(), + ); + + if activate { + // if need be go one more session to activate them + roll_until_matches( + || pallet_session::CurrentIndex::::get() == sessions + 2, + false, + ); + assert_eq!(pallet_session::Validators::::get(), new_validator_set); + } +} diff --git a/substrate/frame/staking-async/ahm-test/src/rc/mod.rs b/substrate/frame/staking-async/ahm-test/src/rc/mod.rs new file mode 100644 index 0000000000000..7a5aa43baf0df --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/src/rc/mod.rs @@ -0,0 +1,22 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod mock; +pub mod test; + +// re-export for easier use in dual runtime tests. +pub use mock::*; diff --git a/substrate/frame/staking-async/ahm-test/src/rc/test.rs b/substrate/frame/staking-async/ahm-test/src/rc/test.rs new file mode 100644 index 0000000000000..32cabcd696514 --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/src/rc/test.rs @@ -0,0 +1,1189 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::rc::mock::*; +use frame::testing_prelude::*; +use pallet_staking_async_ah_client::{self as ah_client, Mode, OperatingMode}; +use pallet_staking_async_rc_client::{ + self as rc_client, Offence, SessionReport, ValidatorSetReport, +}; + +// Tests that are specific to Relay Chain. +#[test] +fn send_session_report_no_election_comes_in() { + ExtBuilder::default().local_queue().build().execute_with(|| { + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 10, false); + assert_eq!( + LocalQueue::get().unwrap(), + vec![ + ( + 30, + OutgoingMessages::SessionReport(SessionReport { + end_index: 0, + validator_points: vec![(11, 580)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 60, + OutgoingMessages::SessionReport(SessionReport { + end_index: 1, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 90, + OutgoingMessages::SessionReport(SessionReport { + end_index: 2, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 120, + OutgoingMessages::SessionReport(SessionReport { + end_index: 3, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 150, + OutgoingMessages::SessionReport(SessionReport { + end_index: 4, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 180, + OutgoingMessages::SessionReport(SessionReport { + end_index: 5, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 210, + OutgoingMessages::SessionReport(SessionReport { + end_index: 6, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 240, + OutgoingMessages::SessionReport(SessionReport { + end_index: 7, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 270, + OutgoingMessages::SessionReport(SessionReport { + end_index: 8, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ), + ( + 300, + OutgoingMessages::SessionReport(SessionReport { + end_index: 9, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false + }) + ) + ] + ); + }) +} + +#[test] +fn upon_receiving_election_queue_and_activate_next_session() { + ExtBuilder::default() + .session_keys(vec![1, 2, 3, 4, 5]) + .local_queue() + .no_default_author() + .build() + .execute_with(|| { + // roll 3 sessions, and then validator set comes + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 3, false); + + assert_eq!( + session_events_since_last_call(), + vec![ + pallet_session::Event::::NewSession { session_index: 1 }, + pallet_session::Event::::NewSession { session_index: 2 }, + pallet_session::Event::::NewSession { session_index: 3 } + ] + ); + + assert_eq!( + LocalQueue::get_since_last_call(), + vec![ + ( + 30, + OutgoingMessages::SessionReport(SessionReport { + end_index: 0, + validator_points: vec![], + activation_timestamp: None, + leftover: false + }) + ), + ( + 60, + OutgoingMessages::SessionReport(SessionReport { + end_index: 1, + validator_points: vec![], + activation_timestamp: None, + leftover: false + }) + ), + ( + 90, + OutgoingMessages::SessionReport(SessionReport { + end_index: 2, + validator_points: vec![], + activation_timestamp: None, + leftover: false + }) + ), + ] + ); + + // current session validators are: + assert!(pallet_session::Validators::::get().is_empty()); + assert_eq!(pallet_session::QueuedChanged::::get(), false); + assert!(pallet_session::QueuedKeys::::get().is_empty()); + + // new validator set comes in. + let report = ValidatorSetReport { + id: 1, + prune_up_to: None, + leftover: false, + new_validator_set: vec![1, 2, 3, 4], + }; + + assert_ok!(ah_client::Pallet::::validator_set(RuntimeOrigin::root(), report)); + + // session validators are not set yet. + assert!(pallet_session::Validators::::get().is_empty()); + assert_eq!(pallet_session::QueuedChanged::::get(), false); + assert!(pallet_session::QueuedKeys::::get().is_empty()); + + // rotate one more session + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 4, false); + + // current validators are still the same + assert!(pallet_session::Validators::::get().is_empty()); + // queued has changed + assert_eq!(pallet_session::QueuedChanged::::get(), true); + assert_eq!( + pallet_session::QueuedKeys::::get() + .into_iter() + .map(|(x, _)| x) + .collect::>(), + vec![1, 2, 3, 4] + ); + + assert_eq!( + session_events_since_last_call(), + vec![ + pallet_session::Event::::NewQueued, + pallet_session::Event::::NewSession { session_index: 4 }, + ] + ); + + assert_eq!( + LocalQueue::get_since_last_call(), + vec![( + 120, + OutgoingMessages::SessionReport(SessionReport { + end_index: 3, + validator_points: vec![], + activation_timestamp: None, + leftover: false + }) + )] + ); + + // rotate one more session + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 5, false); + // current validators have changed + assert_eq!(pallet_session::Validators::::get(), vec![1, 2, 3, 4]); + // queued is back to normal + assert_eq!(pallet_session::QueuedChanged::::get(), false); + assert_eq!( + pallet_session::QueuedKeys::::get() + .into_iter() + .map(|(x, _)| x) + .collect::>(), + vec![1, 2, 3, 4] + ); + assert_eq!( + session_events_since_last_call(), + vec![pallet_session::Event::::NewSession { session_index: 5 },] + ); + + assert_eq!( + LocalQueue::get_since_last_call(), + vec![( + 150, + OutgoingMessages::SessionReport(SessionReport { + end_index: 4, + validator_points: vec![], + activation_timestamp: Some((150000, 1)), + leftover: false + }) + ),] + ); + + // send another report, this time remove 4 and replace with 5 + // new validator set comes in. + let report = ValidatorSetReport { + id: 2, + prune_up_to: None, + leftover: false, + new_validator_set: vec![1, 2, 3, 5], + }; + assert_ok!(ah_client::Pallet::::validator_set(RuntimeOrigin::root(), report)); + + // rotate one more session + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 6, false); + + // current validators not changed + assert_eq!(pallet_session::Validators::::get(), vec![1, 2, 3, 4]); + // queued is set -- notice 5 is queued but not in the current set + assert_eq!(pallet_session::QueuedChanged::::get(), true); + assert_eq!( + pallet_session::QueuedKeys::::get() + .into_iter() + .map(|(x, _)| x) + .collect::>(), + vec![1, 2, 3, 5] + ); + assert_eq!( + session_events_since_last_call(), + vec![ + pallet_session::Event::::NewQueued, + pallet_session::Event::::NewSession { session_index: 6 }, + ] + ); + + assert_eq!( + LocalQueue::get_since_last_call(), + vec![( + 180, + OutgoingMessages::SessionReport(SessionReport { + end_index: 5, + validator_points: vec![], + activation_timestamp: None, + leftover: false + }) + )], + ); + + // rotate one more session + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 7, false); + + // current validators changed + assert_eq!(pallet_session::Validators::::get(), vec![1, 2, 3, 5]); + assert_eq!(pallet_session::QueuedChanged::::get(), false); + assert_eq!( + pallet_session::QueuedKeys::::get() + .into_iter() + .map(|(x, _)| x) + .collect::>(), + vec![1, 2, 3, 5] + ); + assert_eq!( + session_events_since_last_call(), + vec![pallet_session::Event::::NewSession { session_index: 7 }] + ); + + assert_eq!( + LocalQueue::get_since_last_call(), + vec![( + 210, + OutgoingMessages::SessionReport(SessionReport { + end_index: 6, + validator_points: vec![], + activation_timestamp: Some((210000, 2)), + leftover: false + }) + )] + ); + }) +} + +#[test] +fn cleans_validator_points_upon_session_report() { + ExtBuilder::default().local_queue().build().execute_with(|| { + // given + ah_client::ValidatorPoints::::insert(1, 100); + ah_client::ValidatorPoints::::insert(2, 200); + + // when + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 1, false); + + // then + assert_eq!( + LocalQueue::get().unwrap(), + vec![( + 30, + OutgoingMessages::SessionReport(SessionReport { + end_index: 0, + // first two are inserted by us, the other one by the test mock + validator_points: vec![(1, 100), (2, 200), (11, 580)], + activation_timestamp: None, + leftover: false + }) + ),] + ); + + // then it is drained. + assert!(!ah_client::ValidatorPoints::::contains_key(1)); + assert!(!ah_client::ValidatorPoints::::contains_key(2)); + }) +} + +#[test] +fn drops_too_small_validator_set() { + ExtBuilder::default().local_queue().build().execute_with(|| { + assert_eq!(MinimumValidatorSetSize::get(), 4); + let report = ValidatorSetReport { + id: 1, + prune_up_to: None, + leftover: false, + new_validator_set: vec![1], + }; + + // This will raise okay, but nothing is queued, and event is emitted + assert_ok!(ah_client::Pallet::::validator_set(RuntimeOrigin::root(), report),); + assert_eq!( + ah_client_events_since_last_call(), + vec![ah_client::Event::SetTooSmallAndDropped,] + ); + + assert!(ah_client::ValidatorSet::::get().is_none()); + assert!(ah_client::IncompleteValidatorSetReport::::get().is_none()); + }) +} + +#[test] +fn splitted_drops_too_small_validator_set() { + ExtBuilder::default().local_queue().build().execute_with(|| { + let parts = ValidatorSetReport { + id: 1, + prune_up_to: None, + leftover: true, + new_validator_set: vec![1, 2], + } + .split(1); + assert_eq!(parts.len(), 2); + + assert_ok!(ah_client::Pallet::::validator_set( + RuntimeOrigin::root(), + parts[0].clone() + )); + + assert_eq!( + ah_client_events_since_last_call(), + vec![ah_client::Event::ValidatorSetReceived { + id: 1, + new_validator_set_count: 1, + prune_up_to: None, + leftover: true + }] + ); + assert_eq!(ah_client::ValidatorSet::::get(), None); + assert!(ah_client::IncompleteValidatorSetReport::::get().is_some()); + + assert_ok!(ah_client::Pallet::::validator_set( + RuntimeOrigin::root(), + parts[1].clone() + )); + + assert_eq!( + ah_client_events_since_last_call(), + vec![ah_client::Event::SetTooSmallAndDropped] + ); + assert_eq!(ah_client::ValidatorSet::::get(), None); + assert!(ah_client::IncompleteValidatorSetReport::::get().is_none()); + }) +} + +#[test] +fn on_offence_non_validator() { + ExtBuilder::default() + .local_queue() + .session_keys(vec![1, 2, 3, 4]) + .build() + .execute_with(|| { + receive_validator_set_at(3, 1, vec![1, 2, 3, 4], true); + assert_eq!(pallet_session::CurrentIndex::::get(), 5); + + // flush some relevant data + LocalQueue::flush(); + let _ = session_events_since_last_call(); + + // submit an offence for validator 5 in current session, which is not a validator + // really. Note that we have to provide a manual identification, as the default one + // won't work here. + assert_ok!(pallet_root_offences::Pallet::::create_offence( + RuntimeOrigin::root(), + vec![(5, Perbill::from_percent(50))], + Some(vec![Default::default()]), + None + )); + + // we nonetheless have sent the offence report to AH + assert_eq!( + LocalQueue::get_since_last_call(), + vec![( + 150, + OutgoingMessages::OffenceReport( + 5, + vec![Offence { + offender: 5, + reporters: vec![], + slash_fraction: Perbill::from_percent(50) + }] + ) + )] + ); + + // no disabling has happened in session + assert_eq!(session_events_since_last_call(), vec![]); + }) +} + +#[test] +fn on_offence_non_validator_and_active() { + ExtBuilder::default() + .local_queue() + .session_keys(vec![1, 2, 3, 4]) + .build() + .execute_with(|| { + receive_validator_set_at(3, 1, vec![1, 2, 3, 4], true); + assert_eq!(pallet_session::CurrentIndex::::get(), 5); + + // flush some relevant data + LocalQueue::flush(); + let _ = session_events_since_last_call(); + + // submit an offence for 5 and 4, first a non-validator and second an active one. + assert_ok!(pallet_root_offences::Pallet::::create_offence( + RuntimeOrigin::root(), + vec![(4, Perbill::from_percent(50)), (5, Perbill::from_percent(50))], + Some(vec![Default::default(), Default::default()]), + None + )); + + // we nonetheless have sent the offence report to AH + assert_eq!( + LocalQueue::get_since_last_call(), + vec![( + 150, + OutgoingMessages::OffenceReport( + 5, + vec![ + Offence { + offender: 4, + reporters: vec![], + slash_fraction: Perbill::from_percent(50) + }, + Offence { + offender: 5, + reporters: vec![], + slash_fraction: Perbill::from_percent(50) + } + ] + ) + )] + ); + + // one validator has been disabled in session + assert_eq!( + session_events_since_last_call(), + vec![pallet_session::Event::ValidatorDisabled { validator: 4 }] + ); + }) +} + +#[test] +fn wont_disable_past_session_offence() { + ExtBuilder::default() + .local_queue() + .session_keys(vec![1, 2, 3, 4]) + .minimum_validator_set_size(1) + .build() + .execute_with(|| { + // receive 1, 2 at 3, activate them + receive_validator_set_at(3, 1, vec![1, 2], true); + assert_eq!(pallet_session::CurrentIndex::::get(), 5); + + // receive 3, 4 at 6, activate them + receive_validator_set_at(6, 2, vec![3, 4], true); + assert_eq!(pallet_session::CurrentIndex::::get(), 8); + + // flush some relevant data + LocalQueue::flush(); + let _ = session_events_since_last_call(); + + // submit an offence for 1, who is a past validator, in a past session. + assert_ok!(pallet_root_offences::Pallet::::create_offence( + RuntimeOrigin::root(), + vec![(1, Perbill::from_percent(50))], + Some(vec![Default::default()]), + Some(5) + )); + + // we nonetheless have sent the offence report to AH + assert_eq!( + LocalQueue::get_since_last_call(), + vec![( + 240, + OutgoingMessages::OffenceReport( + 5, + vec![Offence { + offender: 1, + reporters: vec![], + slash_fraction: Perbill::from_percent(50) + },] + ) + )] + ); + + // no one disabled in session + assert_eq!(session_events_since_last_call(), vec![]); + }) +} + +#[test] +fn on_offence_disable_and_re_enabled_next_set() { + ExtBuilder::default() + .local_queue() + .session_keys(vec![1, 2, 3, 4]) + .build() + .execute_with(|| { + receive_validator_set_at(3, 1, vec![1, 2, 3, 4], true); + assert_eq!(pallet_session::CurrentIndex::::get(), 5); + + // flush some relevant data + LocalQueue::flush(); + let _ = session_events_since_last_call(); + + // submit an offence for 4 in the current session + assert_ok!(pallet_root_offences::Pallet::::create_offence( + RuntimeOrigin::root(), + vec![(4, Perbill::from_percent(50))], + Some(vec![Default::default()]), + None + )); + + // offence dispatched to AH + assert_eq!( + LocalQueue::get_since_last_call(), + vec![( + 150, + OutgoingMessages::OffenceReport( + 5, + vec![Offence { + offender: 4, + reporters: vec![], + slash_fraction: Perbill::from_percent(50) + },] + ) + )] + ); + + // session disables 4 + assert_eq!( + session_events_since_last_call(), + vec![pallet_session::Event::ValidatorDisabled { validator: 4 }] + ); + assert_eq!( + pallet_session::DisabledValidators::::get() + .into_iter() + .map(|(x, _)| x) + .collect::>(), + vec![3] + ); + + // now receive the same validator set, again + receive_validator_set_at(6, 2, vec![1, 2, 3, 4], true); + assert_eq!(pallet_session::CurrentIndex::::get(), 8); + + // events related to session rotation + assert_eq!( + session_events_since_last_call(), + vec![ + pallet_session::Event::NewSession { session_index: 6 }, + pallet_session::Event::NewQueued, + pallet_session::Event::NewSession { session_index: 7 }, + pallet_session::Event::NewSession { session_index: 8 } + ] + ); + + // disabled validators is now gone + assert!(pallet_session::DisabledValidators::::get().is_empty()); + }); +} + +mod session_pruning { + use super::*; + + #[test] + fn stores_and_prunes_old_validator_set_trie() { + ExtBuilder::default() + .session_keys((1..100).collect::>()) + .local_queue() + .build() + .execute_with(|| { + // initially, no historical data + assert_eq!(pallet_session::historical::StoredRange::::get(), None); + + // forward 10 sessions, and each one set 10 different validators + for i in 1..=10 { + let session_validators = + (i * 10..(i + 1) * 10).map(|x| x as AccountId).collect::>(); + assert_ok!(ah_client::Pallet::::validator_set( + RuntimeOrigin::root(), + ValidatorSetReport { + id: i, + prune_up_to: None, + leftover: false, + new_validator_set: session_validators.clone(), + }, + )); + + roll_until_matches(|| pallet_session::CurrentIndex::::get() == i, false); + assert_eq!( + session_events_since_last_call(), + vec![ + pallet_session::Event::::NewQueued, + pallet_session::Event::::NewSession { session_index: i }, + ] + ); + assert_eq!( + historical_events_since_last_call(), + vec![pallet_session::historical::Event::::RootStored { index: i + 1 }] + ) + } + + // ensure that we have the root for these recorded in the historical session pallet + assert_eq!(pallet_session::historical::StoredRange::::get(), Some((2, 12))); + + // send back a new validator set, but with some pruning info. + assert_ok!(ah_client::Pallet::::validator_set( + RuntimeOrigin::root(), + ValidatorSetReport { + id: 999, + prune_up_to: Some(5), + leftover: false, + new_validator_set: vec![1, 2, 3, 4], + }, + )); + + assert_eq!(pallet_session::historical::StoredRange::::get(), Some((5, 12))); + assert_eq!( + historical_events_since_last_call(), + vec![pallet_session::historical::Event::::RootsPruned { up_to: 5 }] + ); + }) + } +} + +mod blocking { + use super::*; + + #[test] + fn drops_incoming_if_passive_mode() { + ExtBuilder::default().local_queue().pre_migration().build().execute_with(|| { + // given + let report = ValidatorSetReport { + id: 1, + prune_up_to: None, + leftover: false, + new_validator_set: vec![1, 2, 3, 4], + }; + + // when + assert_noop!( + ah_client::Pallet::::validator_set(RuntimeOrigin::root(), report), + ah_client::Error::::Blocked, + ); + + // then + assert_eq!(ah_client::ValidatorSet::::get(), None); + }) + } + + #[test] + fn drops_outgoing_if_passive_mode() { + ExtBuilder::default().local_queue().pre_migration().build().execute_with(|| { + // roll 5 sessions + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 5, false); + + // nothing is queued; No outgoing messages expected in passive mode. + assert_eq!(LocalQueue::get().unwrap(), vec![]); + + // make pallet active + Mode::::put(OperatingMode::Active); + + // roll another session + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 6, false); + + // now session report is queued. + assert_eq!( + LocalQueue::get().unwrap(), + vec![( + 180, + OutgoingMessages::SessionReport(SessionReport { + end_index: 5, + validator_points: vec![(11, 600)], + activation_timestamp: None, + leftover: false, + }) + )] + ); + }) + } +} + +mod splitting { + use super::*; + + #[test] + fn can_split_and_merge_session_report() { + ExtBuilder::default().local_queue().build().execute_with(|| { + let full_report = SessionReport { + activation_timestamp: None, + end_index: 0, + leftover: false, + validator_points: vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + }; + + // Split by 0 + assert_eq!( + full_report + .clone() + .split(0) + .into_iter() + .map(|r| r.validator_points) + .collect::>(), + vec![vec![(1, 1)], vec![(2, 2)], vec![(3, 3)], vec![(4, 4)], vec![(5, 5)]] + ); + + // Split by 1 is noop + assert_eq!( + full_report + .clone() + .split(1) + .into_iter() + .map(|r| r.validator_points) + .collect::>(), + vec![vec![(1, 1)], vec![(2, 2)], vec![(3, 3)], vec![(4, 4)], vec![(5, 5)]] + ); + + // split by 2 + assert_eq!( + full_report + .clone() + .split(2) + .into_iter() + .map(|r| r.validator_points) + .collect::>(), + vec![vec![(1, 1), (2, 2)], vec![(3, 3), (4, 4)], vec![(5, 5)]] + ); + + // split by 3 + assert_eq!( + full_report + .clone() + .split(3) + .into_iter() + .map(|r| r.validator_points) + .collect::>(), + vec![vec![(1, 1), (2, 2), (3, 3)], vec![(4, 4), (5, 5)]] + ); + + // split by 4 + assert_eq!( + full_report + .clone() + .split(4) + .into_iter() + .map(|r| r.validator_points) + .collect::>(), + vec![vec![(1, 1), (2, 2), (3, 3), (4, 4)], vec![(5, 5)]] + ); + + // split by 5 + assert_eq!( + full_report + .clone() + .split(5) + .into_iter() + .map(|r| r.validator_points) + .collect::>(), + vec![vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]] + ); + + // split by 6 + assert_eq!( + full_report + .clone() + .split(6) + .into_iter() + .map(|r| r.validator_points) + .collect::>(), + vec![vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]] + ); + }) + } + + #[test] + fn splitting_and_merging_equal() { + let full_report = ValidatorSetReport { + new_validator_set: vec![1, 2, 3, 4, 5], + id: 0, + prune_up_to: None, + leftover: false, + }; + + for c in 1..=6 { + assert_eq!( + full_report.clone().split(c).into_iter().reduce(|acc, x| acc.merge(x).unwrap()), + Some(full_report.clone()) + ); + } + } + + #[test] + fn can_split_and_merge_validator_set_report() { + ExtBuilder::default().local_queue().build().execute_with(|| { + let full_report = ValidatorSetReport { + new_validator_set: vec![1, 2, 3, 4, 5], + id: 0, + prune_up_to: None, + leftover: false, + }; + + // Split by 0 + assert_eq!( + full_report + .clone() + .split(0) + .into_iter() + .map(|r| r.new_validator_set) + .collect::>(), + vec![vec![1], vec![2], vec![3], vec![4], vec![5]] + ); + + // Split by 1 is noop + assert_eq!( + full_report + .clone() + .split(1) + .into_iter() + .map(|r| r.new_validator_set) + .collect::>(), + vec![vec![1], vec![2], vec![3], vec![4], vec![5]] + ); + + // split by 2 + assert_eq!( + full_report + .clone() + .split(2) + .into_iter() + .map(|r| r.new_validator_set) + .collect::>(), + vec![vec![1, 2], vec![3, 4], vec![5]] + ); + + // split by 3 + assert_eq!( + full_report + .clone() + .split(3) + .into_iter() + .map(|r| r.new_validator_set) + .collect::>(), + vec![vec![1, 2, 3], vec![4, 5]] + ); + + // split by 4 + assert_eq!( + full_report + .clone() + .split(4) + .into_iter() + .map(|r| r.new_validator_set) + .collect::>(), + vec![vec![1, 2, 3, 4], vec![5]] + ); + + // split by 5 + assert_eq!( + full_report + .clone() + .split(5) + .into_iter() + .map(|r| r.new_validator_set) + .collect::>(), + vec![vec![1, 2, 3, 4, 5]] + ); + + // split by 6 + assert_eq!( + full_report + .clone() + .split(6) + .into_iter() + .map(|r| r.new_validator_set) + .collect::>(), + vec![vec![1, 2, 3, 4, 5]] + ); + }) + } + + #[test] + fn can_handle_splitted_validator_set() { + ExtBuilder::default().local_queue().build().execute_with(|| { + let full_report = ValidatorSetReport { + new_validator_set: vec![1, 2, 3, 4, 5, 6], + id: 0, + prune_up_to: None, + leftover: false, + }; + let splitted = full_report.split(2); + let incomplete0 = splitted[0].clone(); + let incomplete1 = splitted[1].clone(); + let complete = splitted[2].clone(); + + assert!(incomplete0.leftover); + assert!(incomplete1.leftover); + assert!(!complete.leftover); + + // nothing is queued. + assert!(ah_client::IncompleteValidatorSetReport::::get().is_none()); + assert!(ah_client::ValidatorSet::::get().is_none()); + + // when + assert_ok!(StakingAhClient::validator_set(RuntimeOrigin::root(), incomplete0.clone())); + assert_eq!( + ah_client_events_since_last_call(), + vec![ah_client::Event::::ValidatorSetReceived { + id: 0, + new_validator_set_count: 2, + prune_up_to: None, + leftover: true + }] + ); + + // then + assert_eq!( + ah_client::IncompleteValidatorSetReport::::get() + .map(|r| r.new_validator_set), + Some(vec![1, 2]) + ); + assert!(ah_client::ValidatorSet::::get().is_none()); + + // when + assert_ok!(StakingAhClient::validator_set(RuntimeOrigin::root(), incomplete1.clone())); + assert_eq!( + ah_client_events_since_last_call(), + vec![ah_client::Event::::ValidatorSetReceived { + id: 0, + new_validator_set_count: 4, + prune_up_to: None, + leftover: true + }] + ); + + // then + assert_eq!( + ah_client::IncompleteValidatorSetReport::::get() + .map(|r| r.new_validator_set), + Some(vec![1, 2, 3, 4]) + ); + assert!(ah_client::ValidatorSet::::get().is_none()); + + // when + assert_ok!(StakingAhClient::validator_set(RuntimeOrigin::root(), complete.clone())); + assert_eq!( + ah_client_events_since_last_call(), + vec![ah_client::Event::::ValidatorSetReceived { + id: 0, + new_validator_set_count: 6, + prune_up_to: None, + leftover: false + }] + ); + + // then + assert_eq!(ah_client::IncompleteValidatorSetReport::::get(), None); + assert_eq!( + ah_client::ValidatorSet::::get(), + Some((0, vec![1, 2, 3, 4, 5, 6])) + ); + }) + } + + #[test] + fn incomplete_wrong_id_dropped() { + ExtBuilder::default().local_queue().build().execute_with(|| { + let incomplete0 = rc_client::ValidatorSetReport { + id: 0, + new_validator_set: vec![1, 2], + leftover: true, + prune_up_to: None, + }; + let broken = rc_client::ValidatorSetReport { + id: 1, + new_validator_set: vec![3, 4], + leftover: true, + prune_up_to: None, + }; + + // nothing is queued. + assert!(ah_client::IncompleteValidatorSetReport::::get().is_none()); + assert!(ah_client::ValidatorSet::::get().is_none()); + + // when + assert_ok!(StakingAhClient::validator_set(RuntimeOrigin::root(), incomplete0.clone())); + + // then + assert_eq!( + ah_client::IncompleteValidatorSetReport::::get() + .map(|r| r.new_validator_set), + Some(vec![1, 2]) + ); + assert!(ah_client::ValidatorSet::::get().is_none()); + + // when + assert_ok!(StakingAhClient::validator_set(RuntimeOrigin::root(), broken.clone())); + // then + assert_eq!(ah_client::IncompleteValidatorSetReport::::get(), None); + assert!(ah_client::ValidatorSet::::get().is_none()); + + assert_eq!( + frame_system::Pallet::::read_events_for_pallet::>( + ), + vec![ + ah_client::Event::::ValidatorSetReceived { + id: 0, + new_validator_set_count: 2, + prune_up_to: None, + leftover: true + }, + ah_client::Event::::CouldNotMergeAndDropped + ] + ); + }) + } +} + +#[cfg(test)] +mod key_proofs { + use frame::traits::KeyOwnerProofSystem; + use frame_support::sp_runtime; + + use super::*; + + #[test] + #[ignore = "not complete yet"] + fn can_generate_valid_latest_key_ownership_proof() { + ExtBuilder::default() + .local_queue() + .session_keys(vec![1, 2, 3, 4]) + .build() + .execute_with(|| { + // no sessions exists, cannot generate any proofs + assert_eq!(pallet_session::historical::StoredRange::::get(), None); + assert_eq!(pallet_session::CurrentIndex::::get(), 0); + + // receive a validator set, and trigger a 3 new sessions, such that we store some + // roots. + assert_ok!(ah_client::Pallet::::validator_set( + RuntimeOrigin::root(), + ValidatorSetReport { + id: 0, + prune_up_to: None, + leftover: false, + new_validator_set: vec![1, 2, 3, 4], + }, + )); + roll_until_matches(|| pallet_session::CurrentIndex::::get() == 3, false); + + assert_eq!( + historical_events_since_last_call(), + vec![ + pallet_session::historical::Event::RootStored { index: 2 }, + pallet_session::historical::Event::RootStored { index: 3 }, + pallet_session::historical::Event::RootStored { index: 4 } + ] + ); + + assert_eq!(pallet_session::CurrentIndex::::get(), 3); + assert_eq!(pallet_session::historical::StoredRange::::get(), Some((2, 5))); + + // generate the proof for one of the validators + use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId, traits::OpaqueKeys}; + + let key_ids = ::key_ids(); + assert_eq!(key_ids.len(), 1, "we have inserted only one key type in mock"); + + let keys = pallet_session::Pallet::::load_keys(&1).unwrap(); + let our_key = keys.get::(key_ids[0]); + assert_eq!(key_ids[0], DUMMY); + + let proof = + pallet_session::historical::Pallet::::prove((DUMMY, &our_key.encode()[..])) + .unwrap(); + + assert_eq!(proof.session, 3); + assert_eq!(proof.validator_count, 4); + + // proof is valid, and it results into a default exposure. + assert_eq!( + pallet_session::historical::Pallet::::check_proof( + (DUMMY, &our_key.encode()[..]), + proof + ) + .unwrap(), + (1, sp_staking::Exposure::default()) + ) + }) + } +} diff --git a/substrate/frame/staking-async/ahm-test/src/shared.rs b/substrate/frame/staking-async/ahm-test/src/shared.rs new file mode 100644 index 0000000000000..c85cffdf9ad64 --- /dev/null +++ b/substrate/frame/staking-async/ahm-test/src/shared.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame::testing_prelude::*; +use std::cell::UnsafeCell; + +thread_local! { + pub static RC_STATE: UnsafeCell = UnsafeCell::new(Default::default()); + pub static AH_STATE: UnsafeCell = UnsafeCell::new(Default::default()); +} + +parameter_types! { + // counts how many times a new offence message is sent from RC -> AH. + pub static CounterRCAHNewOffence: u32 = 0; + // counts how many times a new session report is sent from RC -> AH. + pub static CounterRCAHSessionReport: u32 = 0; + // counts how many times a validator set is sent to RC. + pub static CounterAHRCValidatorSet: u32 = 0; +} + +pub fn put_ah_state(ah: TestState) { + AH_STATE.with(|state| unsafe { + let ptr = state.get(); + *ptr = ah; + }) +} + +pub fn in_ah(f: impl FnMut() -> ()) { + AH_STATE.with(|state| unsafe { + let ptr = state.get(); + (*ptr).execute_with(f) + }) +} + +pub fn put_rc_state(rc: TestState) { + RC_STATE.with(|state| unsafe { + let ptr = state.get(); + *ptr = rc; + }) +} + +pub fn in_rc(f: impl FnMut() -> ()) { + RC_STATE.with(|state| unsafe { + let ptr = state.get(); + (*ptr).execute_with(f) + }) +} + +pub fn migrate_state() { + // NOTE: this is not exhaustive, only migrates the state that is needed for the tests. + shared::in_rc(|| { + let current_era = pallet_staking::CurrentEra::::take(); + let active_era = pallet_staking::ActiveEra::::take().unwrap(); + shared::in_ah(|| { + pallet_staking_async::CurrentEra::::set(current_era); + pallet_staking_async::ActiveEra::::set(Some( + pallet_staking_async::ActiveEraInfo { + index: active_era.index, + start: active_era.start, + }, + )); + }); + + for (era, reward_points) in pallet_staking::ErasRewardPoints::::drain() { + shared::in_ah(|| { + pallet_staking_async::ErasRewardPoints::::insert( + era, + pallet_staking_async::EraRewardPoints { + total: reward_points.total, + individual: reward_points.individual.clone(), + }, + ) + }); + } + + // exposure + for (era, account, overview) in pallet_staking::ErasStakersOverview::::drain() + { + shared::in_ah(|| { + pallet_staking_async::ErasStakersOverview::::insert( + era, account, overview, + ) + }); + } + + for ((era, account, page), exposure_page) in + pallet_staking::ErasStakersPaged::::drain() + { + shared::in_ah(|| { + pallet_staking_async::ErasStakersPaged::::insert( + (era, account, page), + exposure_page.clone(), + ) + }); + } + + for (era, session_index) in pallet_staking::ErasStartSessionIndex::::drain() { + shared::in_ah(|| { + pallet_staking_async::ErasStartSessionIndex::::insert( + era, + session_index, + ) + }); + } + }) +} diff --git a/substrate/frame/staking-async/rc-client/Cargo.toml b/substrate/frame/staking-async/rc-client/Cargo.toml new file mode 100644 index 0000000000000..c555cfd9a6471 --- /dev/null +++ b/substrate/frame/staking-async/rc-client/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "pallet-staking-async-rc-client" +description = "Pallet handling the communication with staking-ah-client. It's role is to glue the staking pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way." +license = "Apache-2.0" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +repository.workspace = true + +[dependencies] +codec = { workspace = true, features = ["derive"] } +frame-support = { workspace = true } +frame-system = { workspace = true } +impl-trait-for-tuples = "0.2.2" +log = { workspace = true } +scale-info = { workspace = true, features = ["derive"] } +sp-core = { workspace = true } +sp-runtime = { features = ["serde"], workspace = true } +sp-staking = { features = ["serde"], workspace = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", + "sp-staking/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/staking-async/rc-client/src/lib.rs b/substrate/frame/staking-async/rc-client/src/lib.rs new file mode 100644 index 0000000000000..db09a4baa388c --- /dev/null +++ b/substrate/frame/staking-async/rc-client/src/lib.rs @@ -0,0 +1,520 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The client for the relay chain, intended to be used in AssetHub. +//! +//! The counter-part for this pallet is `pallet-staking-async-ah-client` on the relay chain. +//! +//! This documentation is divided into the following sections: +//! +//! 1. Incoming messages: the messages that we receive from the relay chian. +//! 2. Outgoing messages: the messaged that we sent to the relay chain. +//! 3. Local interfaces: the interfaces that we expose to other pallets in the runtime. +//! +//! ## Incoming Messages +//! +//! All incoming messages are handled via [`Call`]. They are all gated to be dispatched only by the +//! relay chain origin, as per [`Config::RelayChainOrigin`]. +//! +//! After potential queuing, they are passed to pallet-staking-async via [`AHStakingInterface`]. +//! +//! The calls are: +//! +//! * [`Call::relay_session_report`]: A report from the relay chain, indicating the end of a +//! session. We allow ourselves to know an implementation detail: **The ending of session `x` +//! always implies start of session `x+1` and planning of session `x+2`.** This allows us to have +//! just one message per session. +//! +//! > Note that in the code, due to historical reasons, planning of a new session is called +//! > `new_session`. +//! +//! * [`Call::relay_new_offence`]: A report of one or more offences on the relay chain. +//! +//! ## Outgoing Messages +//! +//! The outgoing messages are expressed in [`SendToRelayChain`]. +//! +//! ## Local Interfaces +//! +//! Within this pallet, we need to talk to the staking-async pallet in AH. This is done via +//! [`AHStakingInterface`] trait. +//! +//! The staking pallet in AH has no communication with session pallet whatsoever, therefore its +//! implementation of `SessionManager`, and it associated type `SessionInterface` no longer exists. +//! Moreover, pallet-staking-async no longer has a notion of timestamp locally, and only relies in +//! the timestamp passed in in the `SessionReport`. +//! +//! ## Shared Types +//! +//! Note that a number of types need to be shared between this crate and `ah-client`. For now, as a +//! convention, they are kept in this crate. This can later be decoupled into a shared crate, or +//! `sp-staking`. +//! +//! TODO: the rest should go to staking-async docs. +//! +//! ## Session Change +//! +//! Further details of how the session change works follows. These details are important to how +//! `pallet-staking-async` should rotate sessions/eras going forward. +//! +//! ### Synchronous Model +//! +//! Let's first consider the old school model, when staking and session lived in the same runtime. +//! Assume 3 sessions is one era. +//! +//! The session pallet issues the following events: +//! +//! end_session / start_session / new_session (plan session) +//! +//! * end 0, start 1, plan 2 +//! * end 1, start 2, plan 3 (new validator set returned) +//! * end 2, start 3 (new validator set activated), plan 4 +//! * end 3, start 4, plan 5 +//! * end 4, start 5, plan 6 (ah-client to already return validator set) and so on. +//! +//! Staking should then do the following: +//! +//! * once a request to plan session 3 comes in, it must return a validator set. This is queued +//! internally in the session pallet, and is enacted later. +//! * at the same time, staking increases its notion of `current_era` by 1. Yet, `active_era` is +//! intact. This is because the validator elected for era n+1 are not yet active in the session +//! pallet. +//! * once a request to _start_ session 3 comes in, staking will rotate its `active_era` to also be +//! incremented to n+1. +//! +//! ### Asynchronous Model +//! +//! Now, if staking lives in AH and the session pallet lives in the relay chain, how will this look +//! like? +//! +//! Staking knows that by the time the relay-chain session index `3` (and later on `6` and so on) is +//! _planned_, it must have already returned a validator set via XCM. +//! +//! conceptually, staking must: +//! +//! - listen to the [`SessionReport`]s coming in, and start a new staking election such that we can +//! be sure it is delivered to the RC well before the the message for planning session 3 received. +//! - Staking should know that, regardless of the timing, these validators correspond to session 3, +//! and an upcoming era. +//! - Staking will keep these pending validators internally within its state. +//! - Once the message to start session 3 is received, staking will act upon it locally. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; +use alloc::vec::Vec; +use frame_support::pallet_prelude::*; +use sp_runtime::Perbill; +use sp_staking::SessionIndex; + +/// Export everything needed for the pallet to be used in the runtime. +pub use pallet::*; + +const LOG_TARGET: &str = "runtime::staking-async::rc-client"; + +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: $crate::LOG_TARGET, + concat!("[{:?}] ⬆️ ", $patter), >::block_number() $(, $values)* + ) + }; +} + +/// The communication trait of `pallet-staking-async-rc-client` -> `relay-chain`. +/// +/// This trait should only encapsulate our _outgoing_ communication to the RC. Any incoming +/// communication comes it directly via our calls. +/// +/// In a real runtime, this is implemented via XCM calls, much like how the core-time pallet works. +/// In a test runtime, it can be wired to direct function calls. +pub trait SendToRelayChain { + /// The validator account ids. + type AccountId; + + /// Send a new validator set report to relay chain. + fn validator_set(report: ValidatorSetReport); +} + +#[derive(Encode, Decode, DecodeWithMemTracking, Debug, Clone, PartialEq, TypeInfo)] +/// A report about a new validator set. This is sent from AH -> RC. +pub struct ValidatorSetReport { + /// The new validator set. + pub new_validator_set: Vec, + /// The id of this validator set. + /// + /// Is an always incrementing identifier for this validator set, the activation of which can be + /// later pointed to in a `SessionReport`. + /// + /// Implementation detail: within `pallet-staking-async`, this is always set to the + /// `planning-era` (aka. `CurrentEra`). + pub id: u32, + /// Signal the relay chain that it can prune up to this session, and enough eras have passed. + /// + /// This can always have a safety buffer. For example, whatever is a sane value, it can be + /// `value - 5`. + pub prune_up_to: Option, + /// Same semantics as [`SessionReport::leftover`]. + pub leftover: bool, +} + +impl ValidatorSetReport { + /// A new instance of self that is terminal. This is useful when we want to send everything in + /// one go. + pub fn new_terminal( + new_validator_set: Vec, + id: u32, + prune_up_to: Option, + ) -> Self { + Self { new_validator_set, id, prune_up_to, leftover: false } + } + + /// Merge oneself with another instance. + pub fn merge(mut self, other: Self) -> Result { + if self.id != other.id || self.prune_up_to != other.prune_up_to { + // Must be some bug -- don't merge. + return Err(UnexpectedKind::ValidatorSetIntegrityFailed); + } + self.new_validator_set.extend(other.new_validator_set); + self.leftover = other.leftover; + Ok(self) + } + + /// Split self into `count` number of pieces. + pub fn split(self, chunk_size: usize) -> Vec + where + AccountId: Clone, + { + let splitted_points = self.new_validator_set.chunks(chunk_size.max(1)).map(|x| x.to_vec()); + let mut parts = splitted_points + .into_iter() + .map(|new_validator_set| Self { new_validator_set, leftover: true, ..self }) + .collect::>(); + if let Some(x) = parts.last_mut() { + x.leftover = false + } + parts + } +} + +#[derive( + Encode, Decode, DecodeWithMemTracking, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen, +)] +/// The information that is sent from RC -> AH on session end. +pub struct SessionReport { + /// The session that is ending. + /// + /// This always implies start of `end_index + 1`, and planning of `end_index + 2`. + pub end_index: SessionIndex, + /// All of the points that validators have accumulated. + /// + /// This can be either from block authoring, or from parachain consensus, or anything else. + pub validator_points: Vec<(AccountId, u32)>, + /// If none, it means no new validator set was activated as a part of this session. + /// + /// If `Some((timestamp, id))`, it means that the new validator set was activated at the given + /// timestamp, and the id of the validator set is `id`. + /// + /// This `id` is what was previously communicated to the RC as a part of + /// [`ValidatorSetReport::id`]. + pub activation_timestamp: Option<(u64, u32)>, + /// If this session report is self-contained, then it is false. + /// + /// If this session report has some leftover, it should not be acted upon until a subsequent + /// message with `leftover = true` comes in. The client pallets should handle this queuing. + /// + /// This is in place to future proof us against possibly needing to send multiple rounds of + /// messages to convey all of the `validator_points`. + /// + /// Upon processing, this should always be true, and it should be ignored. + pub leftover: bool, +} + +impl SessionReport { + /// A new instance of self that is terminal. This is useful when we want to send everything in + /// one go. + pub fn new_terminal( + end_index: SessionIndex, + validator_points: Vec<(AccountId, u32)>, + activation_timestamp: Option<(u64, u32)>, + ) -> Self { + Self { end_index, validator_points, activation_timestamp, leftover: false } + } + + /// Merge oneself with another instance. + pub fn merge(mut self, other: Self) -> Result { + if self.end_index != other.end_index || + self.activation_timestamp != other.activation_timestamp + { + // Must be some bug -- don't merge. + return Err(UnexpectedKind::SessionReportIntegrityFailed); + } + self.validator_points.extend(other.validator_points); + self.leftover = other.leftover; + Ok(self) + } + + /// Split oneself into `count` number of pieces. + pub fn split(self, chunk_size: usize) -> Vec + where + AccountId: Clone, + { + let splitted_points = self.validator_points.chunks(chunk_size.max(1)).map(|x| x.to_vec()); + let mut parts = splitted_points + .into_iter() + .map(|validator_points| Self { validator_points, leftover: true, ..self }) + .collect::>(); + if let Some(x) = parts.last_mut() { + x.leftover = false + } + parts + } +} + +/// Our communication trait of `pallet-staking-async-rc-client` -> `pallet-staking-async`. +/// +/// This is merely a shorthand to avoid tightly-coupling the staking pallet to this pallet. It +/// limits what we can say to `pallet-staking-async` to only these functions. +pub trait AHStakingInterface { + /// The validator account id type. + type AccountId; + /// Maximum number of validators that the staking system may have. + type MaxValidatorSet: Get; + + /// New session report from the relay chain. + fn on_relay_session_report(report: SessionReport); + + /// Report one or more offences on the relay chain. + /// + /// This returns its consumed weight because its complexity is hard to measure. + fn on_new_offences(slash_session: SessionIndex, offences: Vec>); +} + +/// The communication trait of `pallet-staking-async` -> `pallet-staking-async-rc-client`. +pub trait RcClientInterface { + /// The validator account ids. + type AccountId; + + /// Report a new validator set. + fn validator_set(new_validator_set: Vec, id: u32, prune_up_tp: Option); +} + +/// An offence on the relay chain. Based on [`sp_staking::offence::OffenceDetails`]. +#[derive(Encode, Decode, DecodeWithMemTracking, Debug, Clone, PartialEq, TypeInfo)] +pub struct Offence { + /// The offender. + pub offender: AccountId, + /// Those who have reported this offence. + pub reporters: Vec, + /// The amount that they should be slashed. + pub slash_fraction: Perbill, +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use alloc::vec; + use frame_system::pallet_prelude::*; + + /// The in-code storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + + /// An incomplete incoming session report that we have not acted upon yet. + // Note: this can remain unbounded, as the internals of `AHStakingInterface` is benchmarked, and + // is worst case. + #[pallet::storage] + #[pallet::unbounded] + pub type IncompleteSessionReport = + StorageValue<_, SessionReport, OptionQuery>; + + /// The last session report's `end_index` that we have acted upon. + /// + /// This allows this pallet to ensure a sequentially increasing sequence of session reports + /// passed to staking. + /// + /// Note that with the XCM being the backbone of communication, we have a guarantee on the + /// ordering of messages. As long as the RC sends session reports in order, we _eventually_ + /// receive them in the same correct order as well. + #[pallet::storage] + pub type LastSessionReportEndingIndex = StorageValue<_, SessionIndex, OptionQuery>; + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// An origin type that allows us to be sure a call is being dispatched by the relay chain. + /// + /// It be can be configured to something like `Root` or relay chain or similar. + type RelayChainOrigin: EnsureOrigin; + + /// Our communication handle to the local staking pallet. + type AHStakingInterface: AHStakingInterface; + + /// Our communication handle to the relay chain. + type SendToRelayChain: SendToRelayChain; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// A said session report was received. + SessionReportReceived { + end_index: SessionIndex, + activation_timestamp: Option<(u64, u32)>, + validator_points_counts: u32, + leftover: bool, + }, + /// A new offence was reported. + OffenceReceived { slash_session: SessionIndex, offences_count: u32 }, + /// Something occurred that should never happen under normal operation. + /// Logged as an event for fail-safe observability. + Unexpected(UnexpectedKind), + } + + /// Represents unexpected or invariant-breaking conditions encountered during execution. + /// + /// These variants are emitted as [`Event::Unexpected`] and indicate a defensive check has + /// failed. While these should never occur under normal operation, they are useful for + /// diagnosing issues in production or test environments. + #[derive(Clone, Encode, Decode, DecodeWithMemTracking, PartialEq, TypeInfo, RuntimeDebug)] + pub enum UnexpectedKind { + /// We could not merge the chunks, and therefore dropped the session report. + SessionReportIntegrityFailed, + /// We could not merge the chunks, and therefore dropped the validator set. + ValidatorSetIntegrityFailed, + } + + #[pallet::error] + pub enum Error { + /// The session report was not valid, due to a bad end index. + SessionIndexNotValid, + } + + impl RcClientInterface for Pallet { + type AccountId = T::AccountId; + + fn validator_set( + new_validator_set: Vec, + id: u32, + prune_up_tp: Option, + ) { + let report = ValidatorSetReport::new_terminal(new_validator_set, id, prune_up_tp); + T::SendToRelayChain::validator_set(report); + } + } + + #[pallet::call] + impl Pallet { + /// Called to indicate the start of a new session on the relay chain. + #[pallet::call_index(0)] + #[pallet::weight( + // `LastSessionReportEndingIndex`: rw + // `IncompleteSessionReport`: rw + // NOTE: what happens inside `AHStakingInterface` is benchmarked and registered in `pallet-staking-async` + T::DbWeight::get().reads_writes(2, 2) + )] + pub fn relay_session_report( + origin: OriginFor, + report: SessionReport, + ) -> DispatchResult { + log!(info, "Received session report: {:?}", report); + T::RelayChainOrigin::ensure_origin_or_root(origin)?; + + match LastSessionReportEndingIndex::::get() { + None => { + // first session report post genesis, okay. + }, + Some(last) if report.end_index == last + 1 => { + // incremental -- good + }, + Some(incorrect) => { + log!( + error, + "Session report end index is not valid. last_index={:?}, report.index={:?}", + incorrect, + report.end_index + ); + // NOTE: we may want to set ourself to a blocked mode at this point. + return Err(Error::::SessionIndexNotValid.into()); + }, + } + + Self::deposit_event(Event::SessionReportReceived { + end_index: report.end_index, + activation_timestamp: report.activation_timestamp, + validator_points_counts: report.validator_points.len() as u32, + leftover: report.leftover, + }); + + // If we have anything previously buffered, then merge it. + let maybe_new_session_report = match IncompleteSessionReport::::take() { + Some(old) => old.merge(report.clone()), + None => Ok(report), + }; + + if let Err(e) = maybe_new_session_report { + Self::deposit_event(Event::Unexpected(e)); + debug_assert!( + IncompleteSessionReport::::get().is_none(), + "we have ::take() it above, we don't want to keep the old data" + ); + return Ok(()); + } + let new_session_report = maybe_new_session_report.expect("checked above; qed"); + + if new_session_report.leftover { + // this is still not final -- buffer it. + IncompleteSessionReport::::put(new_session_report); + } else { + // this is final, report it. + LastSessionReportEndingIndex::::put(new_session_report.end_index); + T::AHStakingInterface::on_relay_session_report(new_session_report); + } + + Ok(()) + } + + /// Called to report one or more new offenses on the relay chain. + #[pallet::call_index(1)] + #[pallet::weight( + // `on_new_offences` is benchmarked by `pallet-staking-async` + // events are free + // origin check is negligible. + Weight::default() + )] + pub fn relay_new_offence( + origin: OriginFor, + slash_session: SessionIndex, + offences: Vec>, + ) -> DispatchResult { + log!(info, "Received new offence at slash_session: {:?}", slash_session); + T::RelayChainOrigin::ensure_origin_or_root(origin)?; + + Self::deposit_event(Event::OffenceReceived { + slash_session, + offences_count: offences.len() as u32, + }); + + T::AHStakingInterface::on_new_offences(slash_session, offences); + Ok(()) + } + } +} diff --git a/substrate/frame/staking-async/reward-fn/Cargo.toml b/substrate/frame/staking-async/reward-fn/Cargo.toml new file mode 100644 index 0000000000000..e9acca1d312ae --- /dev/null +++ b/substrate/frame/staking-async/reward-fn/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "pallet-staking-async-reward-fn" +version = "19.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "Reward function for FRAME staking pallet" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] + +[dependencies] +log = { workspace = true } +sp-arithmetic = { workspace = true } + +[features] +default = ["std"] +std = ["log/std", "sp-arithmetic/std"] diff --git a/substrate/frame/staking-async/reward-fn/src/lib.rs b/substrate/frame/staking-async/reward-fn/src/lib.rs new file mode 100644 index 0000000000000..d34a534c0425d --- /dev/null +++ b/substrate/frame/staking-async/reward-fn/src/lib.rs @@ -0,0 +1,224 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! Useful function for inflation for nominated proof of stake. + +use sp_arithmetic::{ + biguint::BigUint, + traits::{SaturatedConversion, Zero}, + PerThing, Perquintill, +}; + +/// Compute yearly inflation using function +/// +/// ```ignore +/// I(x) = for x between 0 and x_ideal: x / x_ideal, +/// for x between x_ideal and 1: 2^((x_ideal - x) / d) +/// ``` +/// +/// where: +/// * x is the stake rate, i.e. fraction of total issued tokens that actively staked behind +/// validators. +/// * d is the falloff or `decay_rate` +/// * x_ideal: the ideal stake rate. +/// +/// The result is meant to be scaled with minimum inflation and maximum inflation. +/// +/// (as detailed +/// [here](https://research.web3.foundation/Polkadot/overview/token-economics#inflation-model-with-parachains)) +/// +/// Arguments are: +/// * `stake`: The fraction of total issued tokens that actively staked behind validators. Known as +/// `x` in the literature. Must be between 0 and 1. +/// * `ideal_stake`: The fraction of total issued tokens that should be actively staked behind +/// validators. Known as `x_ideal` in the literature. Must be between 0 and 1. +/// * `falloff`: Known as `decay_rate` in the literature. A co-efficient dictating the strength of +/// the global incentivization to get the `ideal_stake`. A higher number results in less typical +/// inflation at the cost of greater volatility for validators. Must be more than 0.01. +pub fn compute_inflation(stake: P, ideal_stake: P, falloff: P) -> P { + if stake < ideal_stake { + // ideal_stake is more than 0 because it is strictly more than stake + return stake / ideal_stake + } + + if falloff < P::from_percent(1.into()) { + log::error!("Invalid inflation computation: falloff less than 1% is not supported"); + return PerThing::zero() + } + + let accuracy = { + let mut a = BigUint::from(Into::::into(P::ACCURACY)); + a.lstrip(); + a + }; + + let mut falloff = BigUint::from(falloff.deconstruct().into()); + falloff.lstrip(); + + let ln2 = { + /// `ln(2)` expressed in as perquintillionth. + const LN2: u64 = 0_693_147_180_559_945_309; + let ln2 = P::from_rational(LN2.into(), Perquintill::ACCURACY.into()); + BigUint::from(ln2.deconstruct().into()) + }; + + // falloff is stripped above. + let ln2_div_d = div_by_stripped(ln2.mul(&accuracy), &falloff); + + let inpos_param = INPoSParam { + x_ideal: BigUint::from(ideal_stake.deconstruct().into()), + x: BigUint::from(stake.deconstruct().into()), + accuracy, + ln2_div_d, + }; + + let res = compute_taylor_serie_part(&inpos_param); + + match u128::try_from(res.clone()) { + Ok(res) if res <= Into::::into(P::ACCURACY) => P::from_parts(res.saturated_into()), + // If result is beyond bounds there is nothing we can do + _ => { + log::error!("Invalid inflation computation: unexpected result {:?}", res); + P::zero() + }, + } +} + +/// Internal struct holding parameter info alongside other cached value. +/// +/// All expressed in part from `accuracy` +struct INPoSParam { + ln2_div_d: BigUint, + x_ideal: BigUint, + x: BigUint, + /// Must be stripped and have no leading zeros. + accuracy: BigUint, +} + +/// Compute `2^((x_ideal - x) / d)` using taylor serie. +/// +/// x must be strictly more than x_ideal. +/// +/// result is expressed with accuracy `INPoSParam.accuracy` +fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { + // The last computed taylor term. + let mut last_taylor_term = p.accuracy.clone(); + + // Whereas taylor sum is positive. + let mut taylor_sum_positive = true; + + // The sum of all taylor term. + let mut taylor_sum = last_taylor_term.clone(); + + for k in 1..300 { + last_taylor_term = compute_taylor_term(k, &last_taylor_term, p); + + if last_taylor_term.is_zero() { + break + } + + let last_taylor_term_positive = k % 2 == 0; + + if taylor_sum_positive == last_taylor_term_positive { + taylor_sum = taylor_sum.add(&last_taylor_term); + } else if taylor_sum >= last_taylor_term { + taylor_sum = taylor_sum + .sub(&last_taylor_term) + // NOTE: Should never happen as checked above + .unwrap_or_else(|e| e); + } else { + taylor_sum_positive = !taylor_sum_positive; + taylor_sum = last_taylor_term + .clone() + .sub(&taylor_sum) + // NOTE: Should never happen as checked above + .unwrap_or_else(|e| e); + } + } + + if !taylor_sum_positive { + return BigUint::zero() + } + + taylor_sum.lstrip(); + taylor_sum +} + +/// Return the absolute value of k-th taylor term of `2^((x_ideal - x))/d` i.e. +/// `((x - x_ideal) * ln(2) / d)^k / k!` +/// +/// x must be strictly more x_ideal. +/// +/// We compute the term from the last term using this formula: +/// +/// `((x - x_ideal) * ln(2) / d)^k / k! == previous_term * (x - x_ideal) * ln(2) / d / k` +/// +/// `previous_taylor_term` and result are expressed with accuracy `INPoSParam.accuracy` +fn compute_taylor_term(k: u32, previous_taylor_term: &BigUint, p: &INPoSParam) -> BigUint { + let x_minus_x_ideal = + p.x.clone() + .sub(&p.x_ideal) + // NOTE: Should never happen, as x must be more than x_ideal + .unwrap_or_else(|_| BigUint::zero()); + + let res = previous_taylor_term.clone().mul(&x_minus_x_ideal).mul(&p.ln2_div_d).div_unit(k); + + // p.accuracy is stripped by definition. + let res = div_by_stripped(res, &p.accuracy); + let mut res = div_by_stripped(res, &p.accuracy); + + res.lstrip(); + res +} + +/// Compute a div b. +/// +/// requires `b` to be stripped and have no leading zeros. +fn div_by_stripped(mut a: BigUint, b: &BigUint) -> BigUint { + a.lstrip(); + + if b.len() == 0 { + log::error!("Computation error: Invalid division"); + return BigUint::zero() + } + + if b.len() == 1 { + return a.div_unit(b.checked_get(0).unwrap_or(1)) + } + + if b.len() > a.len() { + return BigUint::zero() + } + + if b.len() == a.len() { + // 100_000^2 is more than 2^32-1, thus `new_a` has more limbs than `b`. + let mut new_a = a.mul(&BigUint::from(100_000u64.pow(2))); + new_a.lstrip(); + + debug_assert!(new_a.len() > b.len()); + return new_a + .div(b, false) + .map(|res| res.0) + .unwrap_or_else(BigUint::zero) + .div_unit(100_000) + .div_unit(100_000) + } + + a.div(b, false).map(|res| res.0).unwrap_or_else(BigUint::zero) +} diff --git a/substrate/frame/staking-async/reward-fn/tests/test.rs b/substrate/frame/staking-async/reward-fn/tests/test.rs new file mode 100644 index 0000000000000..9f2383ab574aa --- /dev/null +++ b/substrate/frame/staking-async/reward-fn/tests/test.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_arithmetic::{PerThing, PerU16, Perbill, Percent, Perquintill}; + +/// This test the precision and panics if error too big error. +/// +/// error is asserted to be less or equal to 8/accuracy or 8*f64::EPSILON +fn test_precision(stake: P, ideal_stake: P, falloff: P) { + let accuracy_f64 = Into::::into(P::ACCURACY) as f64; + let res = pallet_staking_async_reward_fn::compute_inflation(stake, ideal_stake, falloff); + let res = Into::::into(res.deconstruct()) as f64 / accuracy_f64; + + let expect = float_i_npos(stake, ideal_stake, falloff); + + let error = (res - expect).abs(); + + if error > 8f64 / accuracy_f64 && error > 8.0 * f64::EPSILON { + panic!( + "stake: {:?}, ideal_stake: {:?}, falloff: {:?}, res: {}, expect: {}", + stake, ideal_stake, falloff, res, expect + ); + } +} + +/// compute the inflation using floats +fn float_i_npos(stake: P, ideal_stake: P, falloff: P) -> f64 { + let accuracy_f64 = Into::::into(P::ACCURACY) as f64; + + let ideal_stake = Into::::into(ideal_stake.deconstruct()) as f64 / accuracy_f64; + let stake = Into::::into(stake.deconstruct()) as f64 / accuracy_f64; + let falloff = Into::::into(falloff.deconstruct()) as f64 / accuracy_f64; + + let x_ideal = ideal_stake; + let x = stake; + let d = falloff; + + if x < x_ideal { + x / x_ideal + } else { + 2_f64.powf((x_ideal - x) / d) + } +} + +#[test] +fn test_precision_for_minimum_falloff() { + fn test_falloff_precision_for_minimum_falloff() { + for stake in 0..1_000 { + let stake = P::from_rational(stake, 1_000); + let ideal_stake = P::zero(); + let falloff = P::from_rational(1, 100); + test_precision(stake, ideal_stake, falloff); + } + } + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); + + test_falloff_precision_for_minimum_falloff::(); +} + +#[test] +fn compute_inflation_works() { + fn compute_inflation_works() { + for stake in 0..100 { + for ideal_stake in 0..10 { + for falloff in 1..10 { + let stake = P::from_rational(stake, 100); + let ideal_stake = P::from_rational(ideal_stake, 10); + let falloff = P::from_rational(falloff, 100); + test_precision(stake, ideal_stake, falloff); + } + } + } + } + + compute_inflation_works::(); + + compute_inflation_works::(); + + compute_inflation_works::(); + + compute_inflation_works::(); +} diff --git a/substrate/frame/staking-async/runtime-api/Cargo.toml b/substrate/frame/staking-async/runtime-api/Cargo.toml new file mode 100644 index 0000000000000..eefb6024378a1 --- /dev/null +++ b/substrate/frame/staking-async/runtime-api/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "pallet-staking-async-runtime-api" +version = "14.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage.workspace = true +repository.workspace = true +description = "RPC runtime API for transaction payment FRAME pallet" +readme = "README.md" + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { features = ["derive"], workspace = true } +sp-api = { workspace = true } +sp-staking = { workspace = true } + +[features] +default = ["std"] +std = ["codec/std", "sp-api/std", "sp-staking/std"] diff --git a/substrate/frame/staking-async/runtime-api/README.md b/substrate/frame/staking-async/runtime-api/README.md new file mode 100644 index 0000000000000..a999e519f8cbf --- /dev/null +++ b/substrate/frame/staking-async/runtime-api/README.md @@ -0,0 +1,3 @@ +Runtime API definition for the staking pallet. + +License: Apache-2.0 diff --git a/substrate/frame/staking-async/runtime-api/src/lib.rs b/substrate/frame/staking-async/runtime-api/src/lib.rs new file mode 100644 index 0000000000000..7955f4184a434 --- /dev/null +++ b/substrate/frame/staking-async/runtime-api/src/lib.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Runtime API definition for the staking pallet. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Codec; + +sp_api::decl_runtime_apis! { + pub trait StakingApi + where + Balance: Codec, + AccountId: Codec, + { + /// Returns the nominations quota for a nominator with a given balance. + fn nominations_quota(balance: Balance) -> u32; + + /// Returns the page count of exposures for a validator `account` in a given era. + fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page; + + /// Returns true if validator `account` has pages to be claimed for the given era. + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool; + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/.gitignore b/substrate/frame/staking-async/runtimes/parachain/.gitignore new file mode 100644 index 0000000000000..a6c57f5fb2ffb --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/.gitignore @@ -0,0 +1 @@ +*.json diff --git a/substrate/frame/staking-async/runtimes/parachain/Cargo.toml b/substrate/frame/staking-async/runtimes/parachain/Cargo.toml new file mode 100644 index 0000000000000..202a1f31a2b44 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/Cargo.toml @@ -0,0 +1,368 @@ +[package] +name = "pallet-staking-async-parachain-runtime" +version = "0.15.0" +authors.workspace = true +edition.workspace = true +description = "A parachain runtime for staking-async" +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { features = ["derive", "max-encoded-len"], workspace = true } +hex-literal = { workspace = true, default-features = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde_json = { features = ["alloc"], workspace = true } + +# Substrate +frame-benchmarking = { optional = true, workspace = true } +frame-election-provider-support = { workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +frame-try-runtime = { optional = true, workspace = true } +pallet-asset-conversion = { workspace = true } +pallet-asset-conversion-ops = { workspace = true } +pallet-asset-conversion-tx-payment = { workspace = true } +pallet-asset-rate = { workspace = true } +pallet-asset-rewards = { workspace = true } +pallet-assets = { workspace = true } +pallet-assets-freezer = { workspace = true } +pallet-aura = { workspace = true } +pallet-authorship = { workspace = true } +pallet-bags-list = { workspace = true } +pallet-balances = { workspace = true } +pallet-conviction-voting = { workspace = true } +pallet-delegated-staking = { workspace = true } +pallet-election-provider-multi-block = { workspace = true } +pallet-fast-unstake = { workspace = true } +pallet-migrations = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nft-fractionalization = { workspace = true } +pallet-nfts = { workspace = true } +pallet-nfts-runtime-api = { workspace = true } +pallet-nomination-pools = { workspace = true } +pallet-nomination-pools-runtime-api = { workspace = true } +pallet-parameters = { workspace = true } +pallet-preimage = { workspace = true } +pallet-proxy = { workspace = true } +pallet-referenda = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-staking-async = { workspace = true } +pallet-staking-async-rc-client = { workspace = true } +pallet-staking-async-runtime-api = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-treasury = { workspace = true } +pallet-uniques = { workspace = true } +pallet-utility = { workspace = true } +pallet-vesting = { workspace = true } +pallet-whitelist = { workspace = true } +sp-api = { workspace = true } +sp-arithmetic = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-aura = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-keyring = { workspace = true } +sp-npos-elections = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-staking = { workspace = true } +sp-std = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } + +# num-traits feature needed for dex integer sq root: +primitive-types = { features = ["codec", "num-traits", "scale-info"], workspace = true } + +# Polkadot +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } +polkadot-parachain-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +westend-runtime-constants = { workspace = true } +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } + +# Cumulus +assets-common = { workspace = true } +cumulus-pallet-aura-ext = { workspace = true } +cumulus-pallet-parachain-system = { workspace = true } +cumulus-pallet-session-benchmarking = { workspace = true } +cumulus-pallet-weight-reclaim = { workspace = true } +cumulus-pallet-xcm = { workspace = true } +cumulus-pallet-xcmp-queue = { features = ["bridging"], workspace = true } +cumulus-primitives-aura = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-primitives-utility = { workspace = true } +pallet-collator-selection = { workspace = true } +pallet-message-queue = { workspace = true } +parachain-info = { workspace = true } +parachains-common = { workspace = true } +testnet-parachains-constants = { features = ["westend"], workspace = true } + +# Bridges +bp-asset-hub-rococo = { workspace = true } +# bp-asset-hub-next-westend = { workspace = true } +bp-bridge-hub-rococo = { workspace = true } +bp-bridge-hub-westend = { workspace = true } +pallet-xcm-bridge-hub-router = { workspace = true } + +[dev-dependencies] +asset-test-utils = { workspace = true, default-features = true } +parachains-runtimes-test-utils = { workspace = true, default-features = true } + +[build-dependencies] +substrate-wasm-builder = { optional = true, workspace = true, default-features = true } + +[features] +default = ["std"] +runtime-benchmarks = [ + "assets-common/runtime-benchmarks", + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-pallet-session-benchmarking/runtime-benchmarks", + "cumulus-pallet-weight-reclaim/runtime-benchmarks", + "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-utility/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-conversion-ops/runtime-benchmarks", + "pallet-asset-conversion-tx-payment/runtime-benchmarks", + "pallet-asset-conversion/runtime-benchmarks", + "pallet-asset-rate/runtime-benchmarks", + "pallet-asset-rewards/runtime-benchmarks", + "pallet-assets-freezer/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-bags-list/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collator-selection/runtime-benchmarks", + "pallet-conviction-voting/runtime-benchmarks", + "pallet-delegated-staking/runtime-benchmarks", + "pallet-election-provider-multi-block/runtime-benchmarks", + "pallet-fast-unstake/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", + "pallet-nft-fractionalization/runtime-benchmarks", + "pallet-nfts/runtime-benchmarks", + "pallet-nomination-pools/runtime-benchmarks", + "pallet-parameters/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", + "pallet-staking-async-rc-client/runtime-benchmarks", + "pallet-staking-async/runtime-benchmarks", + "pallet-state-trie-migration/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "pallet-uniques/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-vesting/runtime-benchmarks", + "pallet-whitelist/runtime-benchmarks", + "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm-bridge-hub-router/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-weight-reclaim/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "cumulus-pallet-xcmp-queue/try-runtime", + "frame-election-provider-support/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-asset-conversion-ops/try-runtime", + "pallet-asset-conversion-tx-payment/try-runtime", + "pallet-asset-conversion/try-runtime", + "pallet-asset-rate/try-runtime", + "pallet-asset-rewards/try-runtime", + "pallet-assets-freezer/try-runtime", + "pallet-assets/try-runtime", + "pallet-aura/try-runtime", + "pallet-authorship/try-runtime", + "pallet-bags-list/try-runtime", + "pallet-balances/try-runtime", + "pallet-collator-selection/try-runtime", + "pallet-conviction-voting/try-runtime", + "pallet-delegated-staking/try-runtime", + "pallet-election-provider-multi-block/try-runtime", + "pallet-fast-unstake/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", + "pallet-multisig/try-runtime", + "pallet-nft-fractionalization/try-runtime", + "pallet-nfts/try-runtime", + "pallet-nomination-pools/try-runtime", + "pallet-parameters/try-runtime", + "pallet-preimage/try-runtime", + "pallet-proxy/try-runtime", + "pallet-referenda/try-runtime", + "pallet-scheduler/try-runtime", + "pallet-session/try-runtime", + "pallet-staking-async-rc-client/try-runtime", + "pallet-staking-async/try-runtime", + "pallet-state-trie-migration/try-runtime", + "pallet-sudo/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-treasury/try-runtime", + "pallet-uniques/try-runtime", + "pallet-utility/try-runtime", + "pallet-vesting/try-runtime", + "pallet-whitelist/try-runtime", + "pallet-xcm-bridge-hub-router/try-runtime", + "pallet-xcm/try-runtime", + "parachain-info/try-runtime", + "polkadot-runtime-common/try-runtime", + "sp-runtime/try-runtime", +] +std = [ + "assets-common/std", + "bp-asset-hub-rococo/std", + # "bp-asset-hub-next-westend/std", + "bp-bridge-hub-rococo/std", + "bp-bridge-hub-westend/std", + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-session-benchmarking/std", + "cumulus-pallet-weight-reclaim/std", + "cumulus-pallet-xcm/std", + "cumulus-pallet-xcmp-queue/std", + "cumulus-primitives-aura/std", + "cumulus-primitives-core/std", + "cumulus-primitives-utility/std", + "frame-benchmarking?/std", + "frame-election-provider-support/std", + "frame-executive/std", + "frame-metadata-hash-extension/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "log/std", + "pallet-asset-conversion-ops/std", + "pallet-asset-conversion-tx-payment/std", + "pallet-asset-conversion/std", + "pallet-asset-rate/std", + "pallet-asset-rewards/std", + "pallet-assets-freezer/std", + "pallet-assets/std", + "pallet-aura/std", + "pallet-authorship/std", + "pallet-bags-list/std", + "pallet-balances/std", + "pallet-collator-selection/std", + "pallet-conviction-voting/std", + "pallet-delegated-staking/std", + "pallet-election-provider-multi-block/std", + "pallet-fast-unstake/std", + "pallet-message-queue/std", + "pallet-migrations/std", + "pallet-multisig/std", + "pallet-nft-fractionalization/std", + "pallet-nfts-runtime-api/std", + "pallet-nfts/std", + "pallet-nomination-pools-runtime-api/std", + "pallet-nomination-pools/std", + "pallet-parameters/std", + "pallet-preimage/std", + "pallet-proxy/std", + "pallet-referenda/std", + "pallet-scheduler/std", + "pallet-session/std", + "pallet-staking-async-rc-client/std", + "pallet-staking-async-runtime-api/std", + "pallet-staking-async/std", + "pallet-state-trie-migration/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "pallet-treasury/std", + "pallet-uniques/std", + "pallet-utility/std", + "pallet-vesting/std", + "pallet-whitelist/std", + "pallet-xcm-benchmarks?/std", + "pallet-xcm-bridge-hub-router/std", + "pallet-xcm/std", + "parachain-info/std", + "parachains-common/std", + "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", + "primitive-types/std", + "scale-info/std", + "serde_json/std", + "sp-api/std", + "sp-arithmetic/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-keyring/std", + "sp-npos-elections/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-staking/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "substrate-wasm-builder", + "testnet-parachains-constants/std", + "westend-runtime-constants/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm-runtime-apis/std", + "xcm/std", +] + +fast-runtime = [] + +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller, like logging for example. +on-chain-release-build = ["metadata-hash"] diff --git a/substrate/frame/staking-async/runtimes/parachain/README.md b/substrate/frame/staking-async/runtimes/parachain/README.md new file mode 100644 index 0000000000000..e23a9538a15f4 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/README.md @@ -0,0 +1,63 @@ +# Staking Async Parachain + +## Overview + +This parachain runtime is a fake fork of the asset-hub next (created original by Donál). It is here +to test the async-staking pallet in a real environment. + +This parachain contains: + +- `pallet-staking-async` +- `pallet-staking-async-rc-client` +- `pallet-election-provider-multi-block` and family +- aux staking pallets `pallet-nomination-pools`, `pallet-fast-unstake`, `pallet-bags-list`, and + `pallet-delegated-staking`. + +All of the above are means to stake and select validators for the RELAY-CHAIN, which is eventually +communicated to it via the `pallet-staking-async-rc-client` pallet. + +A lot more is in the runtime, and can be eventually removed. + +Note that the parachain runtime also contains a `pallet-session` that works with +`pallet-collator-selection` for the PARACHAIN block author selection. + +The counterpart `rc` runtime is a relay chain that is meant to host the parachain. It contains: + +- `pallet-staking-async-ah-client` +- `pallet-session` +- `pallet-authorship` +- And all of the consensus pallets that feed the authority set from the session, such as + aura/babe/grandpa/beefy and so on. + +## Run + +To run this, a one-click script is provided: + +``` +bash build-and-run-zn.sh +``` + +This script will generate chain-specs for both runtimes, and run them with zombie-net. + +> Make sure you have all Polkadot binaries (`polkadot`, `polkadot-execution-worker` and +> `polkadot-prepare-worker`) and `polkadot-parachain` installed in your PATH. You can usually +> download them from the Polkadot-sdk release page. + +You also need `chain-spec-builder`, but the script builds that and uses a fresh one. + +## Chain-spec presets + +We have tried to move as much of the configuration as possible to different chain-specifications, so +that manually tweaking the code is not needed. + +The parachain comes with 3 main chain-spec presets. + +- `development`: 100 validator, 2000 nominators, all 2000 nominators in the snapshot, 10 validator + to be elected, 4 pages +- `dot_size`: 2000 validator, 25_000 nominators, 22_500 nominators in the snapshot, 500 validator to + be elected, 32 pages +- `ksm_size`: 4000 validator, 20_000 nominators, 12_500 nominators in the snapshot, 1000 validator + to be elected, 16 pages + +Both when running the benchmarks (`bench.sh`) and the chain (`build-and-run-zn.sh`), you can specify +the chain-spec preset. See each file for more info as to how. diff --git a/substrate/frame/staking-async/runtimes/parachain/bench.sh b/substrate/frame/staking-async/runtimes/parachain/bench.sh new file mode 100755 index 0000000000000..9599622e9864f --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/bench.sh @@ -0,0 +1,31 @@ +source ~/.zshrc + +STEPS=2 +REPEAT=1 + +# if any of the command line arguments are equal to `--log=X`, set X to the below log levels +LOG="runtime::multiblock-election=debug,runtime::staking-async=debug,polkadot_sdk_frame::benchmark=debug" + +if [ "$3" != "no-compile" ]; then + FORCE_WASM_BUILD=$RANDOM WASMTIME_BACKTRACE_DETAILS=1 RUST_LOG=${LOG} cargo build --release -p pallet-staking-async-parachain-runtime --features runtime-benchmarks +else + echo "Skipping compilation because 'no-compile' argument was provided." +fi + +WASM_BLOB_PATH=../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm + +echo "WASM_BLOB_PATH: $WASM_BLOB_PATH" +echo "Last modified date of WASM_BLOB:" +stat -f "%Sm" $WASM_BLOB_PATH + +WASMTIME_BACKTRACE_DETAILS=1 RUST_LOG=${LOG} \ + frame-omni-bencher v1 benchmark pallet \ + --pallet "$1" \ + --extrinsic "all" \ + --runtime $WASM_BLOB_PATH \ + --steps $STEPS \ + --repeat $REPEAT \ + --genesis-builder-preset $2 \ + --template ../../../../../substrate/.maintain/frame-weight-template.hbs \ + --heap-pages 65000 \ + --output ./$1_$2.rs \ diff --git a/substrate/frame/staking-async/runtimes/parachain/bench_all.sh b/substrate/frame/staking-async/runtimes/parachain/bench_all.sh new file mode 100755 index 0000000000000..e6a440179eb83 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/bench_all.sh @@ -0,0 +1,53 @@ +source ~/.zshrc + +STEPS=10 +REPEAT=20 + +# if any of the command line arguments are equal to `--log=X`, set X to the below log levels +LOG="runtime::multiblock-election=debug,runtime::staking-async=debug,polkadot_sdk_frame::benchmark=debug" + +if [ "$1" != "no-compile" ]; then + cargo build --release -p frame-omni-bencher + FORCE_WASM_BUILD=$RANDOM WASMTIME_BACKTRACE_DETAILS=1 RUST_LOG=${LOG} cargo build --release -p pallet-staking-async-parachain-runtime --features runtime-benchmarks +else + echo "Skipping compilation because 'no-compile' argument was provided." +fi + +WASM_BLOB_PATH=../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm + +echo "WASM_BLOB_PATH: $WASM_BLOB_PATH" +echo "Last modified date of WASM_BLOB:" +stat -f "%Sm" $WASM_BLOB_PATH + +run_benchmark() { + local pallet_name="$1" + local genesis_preset="$2" + local output_file="./${pallet_name}_${genesis_preset}.rs" + + echo "Running benchmark for pallet '$pallet_name' with preset '$genesis_preset'..." + echo "Outputting to '$output_file'" + + WASMTIME_BACKTRACE_DETAILS=1 RUST_LOG=${LOG} \ + ../../../../../target/release/frame-omni-bencher v1 benchmark pallet \ + --pallet "$pallet_name" \ + --extrinsic "all" \ + --runtime "$WASM_BLOB_PATH" \ + --steps "$STEPS" \ + --repeat "$REPEAT" \ + --genesis-builder-preset "$genesis_preset" \ + --template "../../../../../substrate/.maintain/frame-weight-template.hbs" \ + --heap-pages 65000 \ + --output "$output_file" +} + +run_benchmark "pallet_staking_async" "dot_size" +run_benchmark "pallet_election_provider_multi_block" "dot_size" +run_benchmark "pallet_election_provider_multi_block_signed" "dot_size" +run_benchmark "pallet_election_provider_multi_block_unsigned" "dot_size" +run_benchmark "pallet_election_provider_multi_block_verifier" "dot_size" + +run_benchmark "pallet_staking_async" "ksm_size" +run_benchmark "pallet_election_provider_multi_block" "ksm_size" +run_benchmark "pallet_election_provider_multi_block_signed" "ksm_size" +run_benchmark "pallet_election_provider_multi_block_unsigned" "ksm_size" +run_benchmark "pallet_election_provider_multi_block_verifier" "ksm_size" diff --git a/substrate/frame/staking-async/runtimes/parachain/build-and-run-zn.sh b/substrate/frame/staking-async/runtimes/parachain/build-and-run-zn.sh new file mode 100755 index 0000000000000..721c672662215 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/build-and-run-zn.sh @@ -0,0 +1,37 @@ +echo "✅ building chain-spec-builder and pallet-staking-async-rc-runtime and pallet-staking-async-parachain-runtime" + +LOG="runtime::multiblock-election=info,runtime::staking=info" + +if [ "$1" != "no-compile" ]; then + RUST_LOG=${LOG} cargo build --release -p pallet-staking-async-rc-runtime -p pallet-staking-async-parachain-runtime -p staging-chain-spec-builder +else + echo "Skipping compilation because 'no-compile' argument was provided." +fi + +echo "✅ removing any old chain-spec file" +rm ./parachain.json +rm ./rc.json + +echo "✅ creating parachain chain specs" +RUST_LOG=${LOG} ../../../../../target/release/chain-spec-builder \ + create \ + -t development \ + --runtime ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.compressed.wasm \ + --relay-chain rococo-local \ + --para-id 1100 \ + named-preset dot_size + # named-preset ksm_size + # named-preset development + # change this as per your needs ^^^ +mv ./chain_spec.json ./parachain.json + +echo "✅ creating rc chain specs" +RUST_LOG=${LOG} ../../../../../target/release/chain-spec-builder \ + create \ + -t development \ + --runtime ../../../../../target/release/wbuild/pallet-staking-async-rc-runtime/fast_runtime_binary.rs.wasm \ + named-preset local_testnet +mv ./chain_spec.json ./rc.json + +echo "✅ launching ZN" +zombienet --provider native -l text spawn zombienet-staking-runtimes.toml diff --git a/substrate/frame/staking-async/runtimes/parachain/build.rs b/substrate/frame/staking-async/runtimes/parachain/build.rs new file mode 100644 index 0000000000000..cf9664aeb2f3e --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/build.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::build_using_defaults(); +} + +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) + .build(); +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/bag_thresholds.rs b/substrate/frame/staking-async/runtimes/parachain/src/bag_thresholds.rs new file mode 100644 index 0000000000000..cf010da7a185a --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/bag_thresholds.rs @@ -0,0 +1,235 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated voter bag thresholds. +//! +//! Generated on 2021-07-05T14:35:50.538338181+00:00 +//! for the westend runtime. + +/// Existential weight for this runtime. +#[cfg(any(test, feature = "std"))] +#[allow(unused)] +pub const EXISTENTIAL_WEIGHT: u64 = 10_000_000_000; + +/// Constant ratio between bags for this runtime. +#[cfg(any(test, feature = "std"))] +#[allow(unused)] +pub const CONSTANT_RATIO: f64 = 1.1131723507077667; + +/// Upper thresholds delimiting the bag list. +pub const THRESHOLDS: [u64; 200] = [ + 10_000_000_000, + 11_131_723_507, + 12_391_526_824, + 13_793_905_044, + 15_354_993_703, + 17_092_754_435, + 19_027_181_634, + 21_180_532_507, + 23_577_583_160, + 26_245_913_670, + 29_216_225_417, + 32_522_694_326, + 36_203_364_094, + 40_300_583_912, + 44_861_495_728, + 49_938_576_656, + 55_590_242_767, + 61_881_521_217, + 68_884_798_439, + 76_680_653_006, + 85_358_782_760, + 95_019_036_859, + 105_772_564_622, + 117_743_094_401, + 131_068_357_174, + 145_901_671_259, + 162_413_706_368, + 180_794_447_305, + 201_255_379_901, + 224_031_924_337, + 249_386_143_848, + 277_609_759_981, + 309_027_509_097, + 344_000_878_735, + 382_932_266_827, + 426_269_611_626, + 474_511_545_609, + 528_213_132_664, + 587_992_254_562, + 654_536_720_209, + 728_612_179_460, + 811_070_932_564, + 902_861_736_593, + 1_005_040_721_687, + 1_118_783_542_717, + 1_245_398_906_179, + 1_386_343_627_960, + 1_543_239_395_225, + 1_717_891_425_287, + 1_912_309_236_147, + 2_128_729_767_682, + 2_369_643_119_512, + 2_637_821_201_686, + 2_936_349_627_828, + 3_268_663_217_709, + 3_638_585_517_729, + 4_050_372_794_022, + 4_508_763_004_364, + 5_019_030_312_352, + 5_587_045_771_074, + 6_219_344_874_498, + 6_923_202_753_807, + 7_706_717_883_882, + 8_578_905_263_043, + 9_549_800_138_161, + 10_630_573_468_586, + 11_833_660_457_397, + 13_172_903_628_838, + 14_663_712_098_160, + 16_323_238_866_411, + 18_170_578_180_087, + 20_226_985_226_447, + 22_516_120_692_255, + 25_064_322_999_817, + 27_900_911_352_605, + 31_058_523_077_268, + 34_573_489_143_434, + 38_486_252_181_966, + 42_841_831_811_331, + 47_690_342_626_046, + 53_087_570_807_094, + 59_095_615_988_698, + 65_783_605_766_662, + 73_228_491_069_308, + 81_515_931_542_404, + 90_741_281_135_191, + 101_010_685_227_495, + 112_442_301_921_293, + 125_167_661_548_718, + 139_333_180_038_781, + 155_101_843_555_358, + 172_655_083_789_626, + 192_194_865_483_744, + 213_946_010_204_502, + 238_158_783_103_893, + 265_111_772_429_462, + 295_115_094_915_607, + 328_513_963_936_552, + 365_692_661_475_578, + 407_078_959_611_349, + 453_149_042_394_237, + 504_432_984_742_966, + 561_520_851_400_862, + 625_069_486_125_324, + 695_810_069_225_823, + 774_556_530_406_243, + 862_214_913_708_369, + 959_793_802_308_039, + 1_068_415_923_109_985, + 1_189_331_064_661_951, + 1_323_930_457_019_515, + 1_473_762_779_014_021, + 1_640_551_977_100_649, + 1_826_217_100_807_404, + 2_032_894_383_008_501, + 2_262_961_819_074_188, + 2_519_066_527_700_738, + 2_804_155_208_229_882, + 3_121_508_044_894_685, + 3_474_776_448_088_622, + 3_868_025_066_902_796, + 4_305_778_556_320_752, + 4_793_073_637_166_665, + 5_335_517_047_800_242, + 5_939_350_054_341_159, + 6_611_520_261_667_250, + 7_359_761_551_432_161, + 8_192_683_066_856_378, + 9_119_868_268_136_230, + 10_151_985_198_186_376, + 11_300_909_227_415_580, + 12_579_859_689_817_292, + 14_003_551_982_487_792, + 15_588_366_878_604_342, + 17_352_539_001_951_086, + 19_316_366_631_550_092, + 21_502_445_250_375_680, + 23_935_927_525_325_748, + 26_644_812_709_737_600, + 29_660_268_798_266_784, + 33_016_991_140_790_860, + 36_753_601_641_491_664, + 40_913_093_136_236_104, + 45_543_324_061_189_736, + 50_697_569_104_240_168, + 56_435_132_174_936_472, + 62_822_028_745_677_552, + 69_931_745_415_056_768, + 77_846_085_432_775_824, + 86_656_109_914_600_688, + 96_463_185_576_826_656, + 107_380_151_045_315_664, + 119_532_615_158_469_088, + 133_060_402_202_199_856, + 148_119_160_705_543_712, + 164_882_154_307_451_552, + 183_542_255_300_186_560, + 204_314_163_786_713_728, + 227_436_877_985_347_776, + 253_176_444_104_585_088, + 281_829_017_427_734_464, + 313_724_269_827_691_328, + 349_229_182_918_168_832, + 388_752_270_484_770_624, + 432_748_278_778_513_664, + 481_723_418_752_617_984, + 536_241_190_443_833_600, + 596_928_866_512_693_376, + 664_484_709_541_257_600, + 739_686_006_129_409_280, + 823_398_010_228_713_984, + 916_583_898_614_395_264, + 1_020_315_853_041_475_584, + 1_135_787_396_594_579_584, + 1_264_327_126_171_442_688, + 1_407_413_999_103_859_968, + 1_566_694_349_801_462_272, + 1_744_000_832_209_069_824, + 1_941_373_506_026_471_680, + 2_161_083_309_305_266_176, + 2_405_658_187_494_662_656, + 2_677_912_179_572_818_944, + 2_980_977_795_924_034_048, + 3_318_342_060_496_414_208, + 3_693_886_631_935_247_360, + 4_111_932_465_319_354_368, + 4_577_289_528_371_127_808, + 5_095_312_144_166_932_480, + 5_671_960_597_112_134_656, + 6_313_869_711_009_142_784, + 7_028_425_188_266_614_784, + 7_823_848_588_596_424_704, + 8_709_291_924_949_524_480, + 9_694_942_965_096_232_960, + 10_792_142_450_433_898_496, + 12_013_514_580_722_579_456, + 13_373_112_266_084_982_784, + 14_886_578_817_516_689_408, + 16_571_327_936_291_497_984, + 18_446_744_073_709_551_615, +]; diff --git a/substrate/frame/staking-async/runtimes/parachain/src/genesis_config_presets.rs b/substrate/frame/staking-async/runtimes/parachain/src/genesis_config_presets.rs new file mode 100644 index 0000000000000..5e2411011cac8 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/genesis_config_presets.rs @@ -0,0 +1,134 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Staking Async Runtime genesis config presets + +use crate::*; +use alloc::{vec, vec::Vec}; +use cumulus_primitives_core::ParaId; +use frame_support::build_struct_json_patch; +use parachains_common::{AccountId, AuraId}; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; +use testnet_parachains_constants::westend::{ + currency::UNITS as WND, xcm_version::SAFE_XCM_VERSION, +}; + +const STAKING_ASYNC_PARA_ED: Balance = ExistentialDeposit::get(); + +struct GenesisParams { + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + endowment: Balance, + dev_stakers: Option<(u32, u32)>, + pages: u32, + max_electing_voters: u32, + validator_count: u32, + root: AccountId, + id: ParaId, +} + +fn staking_async_parachain_genesis(params: GenesisParams) -> serde_json::Value { + let GenesisParams { + invulnerables, + endowed_accounts, + endowment, + dev_stakers, + validator_count, + root, + // TODO: find a way to set these here, but for now we will set them directly in the runtime. + pages: _pages, + max_electing_voters: _max_electing_voters, + id, + } = params; + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, endowment)).collect(), + }, + parachain_info: ParachainInfoConfig { parachain_id: id }, + collator_selection: CollatorSelectionConfig { + invulnerables: invulnerables.iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: STAKING_ASYNC_PARA_ED * 16, + }, + session: SessionConfig { + keys: invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: PolkadotXcmConfig { safe_xcm_version: Some(SAFE_XCM_VERSION) }, + sudo: SudoConfig { key: Some(root) }, + staking: StakingConfig { validator_count, dev_stakers, ..Default::default() } + }) +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &PresetId) -> Option> { + let mut dev_and_testnet_params = GenesisParams { + invulnerables: vec![ + (Sr25519Keyring::Alice.to_account_id(), Sr25519Keyring::Alice.public().into()), + (Sr25519Keyring::Bob.to_account_id(), Sr25519Keyring::Bob.public().into()), + ], + endowed_accounts: Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect(), + endowment: WND * 1_000_000, + dev_stakers: Some((100, 2000)), + validator_count: 10, + root: Sr25519Keyring::Alice.to_account_id(), + id: 1100.into(), + max_electing_voters: 2000, + pages: 4, + }; + let patch = match id.as_ref() { + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => + staking_async_parachain_genesis(dev_and_testnet_params), + sp_genesis_builder::DEV_RUNTIME_PRESET => + staking_async_parachain_genesis(dev_and_testnet_params), + "ksm_size" => { + dev_and_testnet_params.validator_count = 1_000; + dev_and_testnet_params.dev_stakers = Some((4_000, 20_000)); + staking_async_parachain_genesis(dev_and_testnet_params) + }, + "dot_size" => { + dev_and_testnet_params.validator_count = 500; + dev_and_testnet_params.dev_stakers = Some((2_000, 25_000)); + staking_async_parachain_genesis(dev_and_testnet_params) + }, + _ => panic!("unrecognized genesis preset!"), + }; + + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + vec![ + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + PresetId::from("ksm_size"), + PresetId::from("dot_size"), + ] +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/governance/mod.rs b/substrate/frame/staking-async/runtimes/parachain/src/governance/mod.rs new file mode 100644 index 0000000000000..6ad74378e50b6 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/governance/mod.rs @@ -0,0 +1,223 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! New governance configurations for the Kusama runtime. + +use super::*; +use crate::xcm_config::Collectives; +use frame_support::{ + parameter_types, + traits::{ + fungible::HoldConsideration, tokens::UnityOrOuterConversion, EitherOf, EitherOfDiverse, + FromContains, LinearStoragePrice, + }, +}; +use frame_system::EnsureRootWithSuccess; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use polkadot_runtime_common::impls::{ + ContainsParts, LocatableAssetConverter, VersionedLocatableAsset, VersionedLocationConverter, +}; +use sp_runtime::{traits::IdentityLookup, Percent}; +use xcm::latest::{ + prelude::{InteriorLocation, PalletInstance}, + BodyId, +}; + +mod origins; +pub use origins::{ + pallet_custom_origins, AuctionAdmin, FellowshipAdmin, GeneralAdmin, LeaseAdmin, + ReferendumCanceller, ReferendumKiller, Spender, StakingAdmin, Treasurer, WhitelistedCaller, +}; +mod tracks; +pub use tracks::TracksInfo; +use xcm_builder::PayOverXcm; + +parameter_types! { + pub const VoteLockingPeriod: BlockNumber = 7 * DAYS; +} + +impl pallet_conviction_voting::Config for Runtime { + type WeightInfo = weights::pallet_conviction_voting::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type VoteLockingPeriod = VoteLockingPeriod; + type MaxVotes = ConstU32<512>; + type MaxTurnout = + frame_support::traits::tokens::currency::ActiveIssuanceOf; + type Polls = Referenda; + type BlockNumberProvider = RelayChainBlockNumberProvider; + type VotingHooks = (); +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 1 * 3 * CENTS; + pub const UndecidingTimeout: BlockNumber = 14 * DAYS; +} + +impl origins::pallet_custom_origins::Config for Runtime {} + +parameter_types! { + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + +impl pallet_whitelist::Config for Runtime { + type WeightInfo = weights::pallet_whitelist::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WhitelistOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, + >; + type DispatchWhitelistedOrigin = EitherOf, WhitelistedCaller>; + type Preimages = Preimage; +} + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + type SubmitOrigin = frame_system::EnsureSigned; + type CancelOrigin = EitherOf, ReferendumCanceller>; + type KillOrigin = EitherOf, ReferendumKiller>; + type Slash = Treasury; + type Votes = pallet_conviction_voting::VotesOf; + type Tally = pallet_conviction_voting::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = TracksInfo; + type Preimages = Preimage; + type BlockNumberProvider = RelayChainBlockNumberProvider; +} + +parameter_types! { + pub const SpendPeriod: BlockNumber = 6 * DAYS; + pub const Burn: Permill = Permill::from_perthousand(2); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + pub const PayoutSpendPeriod: BlockNumber = 30 * DAYS; + // The asset's interior location for the paying account. This is the Treasury + // pallet instance (which sits at index 37). + pub TreasuryInteriorLocation: InteriorLocation = PalletInstance(37).into(); + + pub const TipCountdown: BlockNumber = 1 * DAYS; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: Balance = 100 * CENTS; + pub const DataDepositPerByte: Balance = 1 * CENTS; + pub const MaxApprovals: u32 = 100; + pub const MaxAuthorities: u32 = 100_000; + pub const MaxKeys: u32 = 10_000; + pub const MaxPeerInHeartbeats: u32 = 10_000; + pub const MaxBalance: Balance = Balance::max_value(); +} + +pub type TreasurySpender = EitherOf, Spender>; + +impl pallet_treasury::Config for Runtime { + type PalletId = TreasuryPalletId; + type Currency = Balances; + type RejectOrigin = EitherOfDiverse, Treasurer>; + type RuntimeEvent = RuntimeEvent; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); + type MaxApprovals = MaxApprovals; + type WeightInfo = weights::pallet_treasury::WeightInfo; + type SpendFunds = (); + type SpendOrigin = TreasurySpender; + type AssetKind = VersionedLocatableAsset; + type Beneficiary = VersionedLocation; + type BeneficiaryLookup = IdentityLookup; + type Paymaster = PayOverXcm< + TreasuryInteriorLocation, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + Self::Beneficiary, + Self::AssetKind, + LocatableAssetConverter, + VersionedLocationConverter, + >; + type BalanceConverter = UnityOrOuterConversion< + ContainsParts< + FromContains< + xcm_builder::IsChildSystemParachain, + xcm_builder::IsParentsOnly>, + >, + >, + AssetRate, + >; + type PayoutPeriod = PayoutSpendPeriod; + type BlockNumberProvider = RelayChainBlockNumberProvider; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments; +} +impl pallet_asset_rate::Config for Runtime { + type WeightInfo = weights::pallet_asset_rate::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type CreateOrigin = EnsureRoot; + type RemoveOrigin = EnsureRoot; + type UpdateOrigin = EnsureRoot; + type Currency = Balances; + type AssetKind = ::AssetKind; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::AssetRateArguments; +} + +parameter_types! { + pub MaximumSchedulerWeight: frame_support::weights::Weight = Perbill::from_percent(80) * + RuntimeBlockWeights::get().max_block; + pub const MaxScheduledPerBlock: u32 = 50; + pub const NoPreimagePostponement: Option = Some(10); +} + +impl pallet_scheduler::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type RuntimeCall = RuntimeCall; + type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = weights::pallet_scheduler::WeightInfo; + type OriginPrivilegeCmp = frame_support::traits::EqualPrivilegeOnly; + type Preimages = Preimage; + type BlockNumberProvider = RelayChainBlockNumberProvider; +} + +parameter_types! { + pub const PreimageBaseDeposit: Balance = deposit(2, 64); + pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); +} + +impl pallet_preimage::Config for Runtime { + type WeightInfo = weights::pallet_preimage::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/governance/origins.rs b/substrate/frame/staking-async/runtimes/parachain/src/governance/origins.rs new file mode 100644 index 0000000000000..da3a12449e3fa --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/governance/origins.rs @@ -0,0 +1,205 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Custom origins for governance interventions. + +pub use pallet_custom_origins::*; + +#[frame_support::pallet] +pub mod pallet_custom_origins { + use crate::{Balance, CENTS, GRAND}; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[derive( + PartialEq, + Eq, + Clone, + MaxEncodedLen, + Encode, + Decode, + DecodeWithMemTracking, + TypeInfo, + RuntimeDebug, + )] + #[pallet::origin] + pub enum Origin { + /// Origin for cancelling slashes. + StakingAdmin, + /// Origin for spending (any amount of) funds. + Treasurer, + /// Origin for managing the composition of the fellowship. + FellowshipAdmin, + /// Origin for managing the registrar. + GeneralAdmin, + /// Origin for starting auctions. + AuctionAdmin, + /// Origin able to force slot leases. + LeaseAdmin, + /// Origin able to cancel referenda. + ReferendumCanceller, + /// Origin able to kill referenda. + ReferendumKiller, + /// Origin able to spend up to 1 KSM from the treasury at once. + SmallTipper, + /// Origin able to spend up to 5 KSM from the treasury at once. + BigTipper, + /// Origin able to spend up to 50 KSM from the treasury at once. + SmallSpender, + /// Origin able to spend up to 500 KSM from the treasury at once. + MediumSpender, + /// Origin able to spend up to 5,000 KSM from the treasury at once. + BigSpender, + /// Origin able to dispatch a whitelisted call. + WhitelistedCaller, + /// Origin commanded by any members of the Polkadot Fellowship (no Dan grade needed). + FellowshipInitiates, + /// Origin commanded by Polkadot Fellows (3rd Dan fellows or greater). + Fellows, + /// Origin commanded by Polkadot Experts (5th Dan fellows or greater). + FellowshipExperts, + /// Origin commanded by Polkadot Masters (7th Dan fellows of greater). + FellowshipMasters, + /// Origin commanded by rank 1 of the Polkadot Fellowship and with a success of 1. + Fellowship1Dan, + /// Origin commanded by rank 2 of the Polkadot Fellowship and with a success of 2. + Fellowship2Dan, + /// Origin commanded by rank 3 of the Polkadot Fellowship and with a success of 3. + Fellowship3Dan, + /// Origin commanded by rank 4 of the Polkadot Fellowship and with a success of 4. + Fellowship4Dan, + /// Origin commanded by rank 5 of the Polkadot Fellowship and with a success of 5. + Fellowship5Dan, + /// Origin commanded by rank 6 of the Polkadot Fellowship and with a success of 6. + Fellowship6Dan, + /// Origin commanded by rank 7 of the Polkadot Fellowship and with a success of 7. + Fellowship7Dan, + /// Origin commanded by rank 8 of the Polkadot Fellowship and with a success of 8. + Fellowship8Dan, + /// Origin commanded by rank 9 of the Polkadot Fellowship and with a success of 9. + Fellowship9Dan, + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + StakingAdmin, + Treasurer, + FellowshipAdmin, + GeneralAdmin, + AuctionAdmin, + LeaseAdmin, + ReferendumCanceller, + ReferendumKiller, + WhitelistedCaller, + FellowshipInitiates: u16 = 0, + Fellows: u16 = 3, + FellowshipExperts: u16 = 5, + FellowshipMasters: u16 = 7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result = Err(()); + $( + let _result: Result = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + decl_ensure! { + pub type Spender: EnsureOrigin { + SmallTipper = 250 * 3 * CENTS, + BigTipper = 1 * GRAND, + SmallSpender = 10 * GRAND, + MediumSpender = 100 * GRAND, + BigSpender = 1_000 * GRAND, + Treasurer = 10_000 * GRAND, + } + } + + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin { + Fellowship1Dan = 1, + Fellowship2Dan = 2, + Fellowship3Dan = 3, + Fellowship4Dan = 4, + Fellowship5Dan = 5, + Fellowship6Dan = 6, + Fellowship7Dan = 7, + Fellowship8Dan = 8, + Fellowship9Dan = 9, + } + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/governance/tracks.rs b/substrate/frame/staking-async/runtimes/parachain/src/governance/tracks.rs new file mode 100644 index 0000000000000..8801448071a78 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/governance/tracks.rs @@ -0,0 +1,326 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Track configurations for governance. + +use super::*; + +use alloc::borrow::Cow; +use sp_runtime::str_array as s; + +const fn percent(x: i32) -> sp_arithmetic::FixedI64 { + sp_arithmetic::FixedI64::from_rational(x as u128, 100) +} +use pallet_referenda::Curve; +const APP_ROOT: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_ROOT: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_STAKING_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_STAKING_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_TREASURER: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_TREASURER: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_FELLOWSHIP_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_FELLOWSHIP_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_LEASE_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_LEASE_ADMIN: Curve = Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_CANCELLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_CANCELLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_KILLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_KILLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_SMALL_TIPPER: Curve = Curve::make_reciprocal(1, 28, percent(4), percent(0), percent(50)); +const APP_BIG_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_BIG_TIPPER: Curve = Curve::make_reciprocal(8, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_SPENDER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_SMALL_SPENDER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_MEDIUM_SPENDER: Curve = Curve::make_linear(23, 28, percent(50), percent(100)); +const SUP_MEDIUM_SPENDER: Curve = + Curve::make_reciprocal(16, 28, percent(1), percent(0), percent(50)); +const APP_BIG_SPENDER: Curve = Curve::make_linear(28, 28, percent(50), percent(100)); +const SUP_BIG_SPENDER: Curve = Curve::make_reciprocal(20, 28, percent(1), percent(0), percent(50)); +const APP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(16, 28 * 24, percent(96), percent(50), percent(100)); +const SUP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(1, 28, percent(20), percent(5), percent(50)); + +const TRACKS_DATA: [pallet_referenda::Track; 15] = [ + pallet_referenda::Track { + id: 0, + info: pallet_referenda::TrackInfo { + name: s("root"), + max_deciding: 1, + decision_deposit: 100 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_ROOT, + min_support: SUP_ROOT, + }, + }, + pallet_referenda::Track { + id: 1, + info: pallet_referenda::TrackInfo { + name: s("whitelisted_caller"), + max_deciding: 100, + decision_deposit: 10 * GRAND, + prepare_period: 6 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_WHITELISTED_CALLER, + min_support: SUP_WHITELISTED_CALLER, + }, + }, + pallet_referenda::Track { + id: 10, + info: pallet_referenda::TrackInfo { + name: s("staking_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_STAKING_ADMIN, + min_support: SUP_STAKING_ADMIN, + }, + }, + pallet_referenda::Track { + id: 11, + info: pallet_referenda::TrackInfo { + name: s("treasurer"), + max_deciding: 10, + decision_deposit: 1 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_TREASURER, + min_support: SUP_TREASURER, + }, + }, + pallet_referenda::Track { + id: 12, + info: pallet_referenda::TrackInfo { + name: s("lease_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_LEASE_ADMIN, + min_support: SUP_LEASE_ADMIN, + }, + }, + pallet_referenda::Track { + id: 13, + info: pallet_referenda::TrackInfo { + name: s("fellowship_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_FELLOWSHIP_ADMIN, + min_support: SUP_FELLOWSHIP_ADMIN, + }, + }, + pallet_referenda::Track { + id: 14, + info: pallet_referenda::TrackInfo { + name: s("general_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_GENERAL_ADMIN, + min_support: SUP_GENERAL_ADMIN, + }, + }, + pallet_referenda::Track { + id: 15, + info: pallet_referenda::TrackInfo { + name: s("auction_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_AUCTION_ADMIN, + min_support: SUP_AUCTION_ADMIN, + }, + }, + pallet_referenda::Track { + id: 20, + info: pallet_referenda::TrackInfo { + name: s("referendum_canceller"), + max_deciding: 1_000, + decision_deposit: 10 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_CANCELLER, + min_support: SUP_REFERENDUM_CANCELLER, + }, + }, + pallet_referenda::Track { + id: 21, + info: pallet_referenda::TrackInfo { + name: s("referendum_killer"), + max_deciding: 1_000, + decision_deposit: 50 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_KILLER, + min_support: SUP_REFERENDUM_KILLER, + }, + }, + pallet_referenda::Track { + id: 30, + info: pallet_referenda::TrackInfo { + name: s("small_tipper"), + max_deciding: 200, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 1 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: APP_SMALL_TIPPER, + min_support: SUP_SMALL_TIPPER, + }, + }, + pallet_referenda::Track { + id: 31, + info: pallet_referenda::TrackInfo { + name: s("big_tipper"), + max_deciding: 100, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 4 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_BIG_TIPPER, + min_support: SUP_BIG_TIPPER, + }, + }, + pallet_referenda::Track { + id: 32, + info: pallet_referenda::TrackInfo { + name: s("small_spender"), + max_deciding: 50, + decision_deposit: 100 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 10 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_SMALL_SPENDER, + min_support: SUP_SMALL_SPENDER, + }, + }, + pallet_referenda::Track { + id: 33, + info: pallet_referenda::TrackInfo { + name: s("medium_spender"), + max_deciding: 50, + decision_deposit: 200 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_MEDIUM_SPENDER, + min_support: SUP_MEDIUM_SPENDER, + }, + }, + pallet_referenda::Track { + id: 34, + info: pallet_referenda::TrackInfo { + name: s("big_spender"), + max_deciding: 50, + decision_deposit: 400 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 14 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_BIG_SPENDER, + min_support: SUP_BIG_SPENDER, + }, + }, +]; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = u16; + type RuntimeOrigin = ::PalletsOrigin; + + fn tracks( + ) -> impl Iterator>> + { + TRACKS_DATA.iter().map(Cow::Borrowed) + } + fn track_for(id: &Self::RuntimeOrigin) -> Result { + if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { + match system_origin { + frame_system::RawOrigin::Root => Ok(0), + _ => Err(()), + } + } else if let Ok(custom_origin) = origins::Origin::try_from(id.clone()) { + match custom_origin { + origins::Origin::WhitelistedCaller => Ok(1), + // General admin + origins::Origin::StakingAdmin => Ok(10), + origins::Origin::Treasurer => Ok(11), + origins::Origin::LeaseAdmin => Ok(12), + origins::Origin::FellowshipAdmin => Ok(13), + origins::Origin::GeneralAdmin => Ok(14), + origins::Origin::AuctionAdmin => Ok(15), + // Referendum admins + origins::Origin::ReferendumCanceller => Ok(20), + origins::Origin::ReferendumKiller => Ok(21), + // Limited treasury spenders + origins::Origin::SmallTipper => Ok(30), + origins::Origin::BigTipper => Ok(31), + origins::Origin::SmallSpender => Ok(32), + origins::Origin::MediumSpender => Ok(33), + origins::Origin::BigSpender => Ok(34), + _ => Err(()), + } + } else { + Err(()) + } + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/lib.rs b/substrate/frame/staking-async/runtimes/parachain/src/lib.rs new file mode 100644 index 0000000000000..d8612cd0021e1 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/lib.rs @@ -0,0 +1,2256 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Staking Async Parachain Runtime + +#![cfg_attr(not(feature = "std"), no_std)] +#![allow(non_local_definitions)] +#![recursion_limit = "512"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +mod genesis_config_presets; +mod weights; +pub mod xcm_config; + +// Configurations for next functionality. +mod bag_thresholds; +pub mod governance; +mod staking; + +extern crate alloc; + +use alloc::{vec, vec::Vec}; +use assets_common::{ + local_and_foreign_assets::{LocalFromLeft, TargetFromLeft}, + AssetIdForPoolAssets, AssetIdForPoolAssetsConvert, AssetIdForTrustBackedAssetsConvert, +}; +use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen}; +use cumulus_pallet_parachain_system::{RelayNumberMonotonicallyIncreases, RelaychainDataProvider}; +use cumulus_primitives_core::{AggregateMessageOrigin, ClaimQueueOffset, CoreSelector, ParaId}; +use frame_support::{ + construct_runtime, derive_impl, + dispatch::DispatchClass, + genesis_builder_helper::{build_state, get_preset}, + ord_parameter_types, parameter_types, + traits::{ + fungible, + fungible::HoldConsideration, + fungibles, + tokens::{imbalance::ResolveAssetTo, nonfungibles_v2::Inspect}, + AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, + ConstantStoragePrice, Equals, InstanceFilter, TransformOrigin, WithdrawReasons, + }, + weights::{ConstantMultiplier, Weight, WeightToFee as _}, + BoundedVec, PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, EnsureSigned, EnsureSignedBy, +}; +use governance::{pallet_custom_origins, FellowshipAdmin, GeneralAdmin, StakingAdmin, Treasurer}; +use pallet_asset_conversion_tx_payment::SwapAssetAdapter; +use pallet_nfts::PalletFeatures; +use pallet_nomination_pools::PoolId; +use pallet_xcm::EnsureXcm; +use parachains_common::{ + impls::DealWithFees, message_queue::*, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, + BlockNumber, CollectionId, Hash, Header, ItemId, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, + NORMAL_DISPATCH_RATIO, +}; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + generic, impl_opaque_keys, + traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, Verify}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, +}; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; +use testnet_parachains_constants::westend::{ + consensus::*, currency::*, fee::WeightToFee, snowbridge::EthereumNetwork, time::*, +}; +use xcm_config::{ + ForeignAssetsConvertedConcreteId, LocationToAccountId, PoolAssetsConvertedConcreteId, + PoolAssetsPalletLocation, TrustBackedAssetsConvertedConcreteId, + TrustBackedAssetsPalletLocation, WestendLocation, XcmOriginToTransactDispatchOrigin, +}; + +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; + +use assets_common::{ + foreign_creators::ForeignCreators, + matching::{FromNetwork, FromSiblingParachain}, +}; +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; +use xcm::{ + latest::prelude::AssetId, + prelude::{VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, +}; + +#[cfg(feature = "runtime-benchmarks")] +use frame_support::traits::PalletInfoAccess; + +#[cfg(feature = "runtime-benchmarks")] +use xcm::latest::prelude::{ + Asset, Assets as XcmAssets, Fungible, Here, InteriorLocation, Junction, Junction::*, Location, + NetworkId, NonFungible, Parent, ParentThen, Response, XCM_VERSION, +}; + +use xcm_runtime_apis::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + // Note: "westmint" is the legacy name for this chain. It has been renamed to + // "asset-hub-next-westend". Many wallets/tools depend on the `spec_name`, so it remains + // "westmint" for the time being. Wallets/tools should update to treat "asset-hub-next-westend" + // equally. + spec_name: alloc::borrow::Cow::Borrowed("asset-hub-next"), + impl_name: alloc::borrow::Cow::Borrowed("asset-hub-next"), + authoring_version: 1, + spec_version: 1_017_007, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 16, + system_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +type RelayChainBlockNumberProvider = RelaychainDataProvider; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u8 = 42; +} + +// Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig)] +impl frame_system::Config for Runtime { + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type AccountId = AccountId; + type Nonce = Nonce; + type Hash = Hash; + type Block = Block; + type BlockHashCount = BlockHashCount; + type DbWeight = RocksDbWeight; + type Version = Version; + type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; + type SS58Prefix = SS58Prefix; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; +} + +impl cumulus_pallet_weight_reclaim::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_weight_reclaim::WeightInfo; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<0>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = frame_support::traits::VariantCountOf; + type DoneSlashHandler = (); +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = MILLICENTS; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = + pallet_transaction_payment::FungibleAdapter>; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = ConstU8<5>; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; +} + +parameter_types! { + pub const AssetDeposit: Balance = UNITS / 10; // 1 / 10 WND deposit to create asset + pub const AssetAccountDeposit: Balance = deposit(1, 16); + pub const ApprovalDeposit: Balance = EXISTENTIAL_DEPOSIT; + pub const AssetsStringLimit: u32 = 50; + /// Key = 32 bytes, Value = 36 bytes (32+1+1+1+1) + // https://github.com/paritytech/substrate/blob/069917b/frame/assets/src/lib.rs#L257L271 + pub const MetadataDepositBase: Balance = deposit(1, 68); + pub const MetadataDepositPerByte: Balance = deposit(0, 1); +} + +pub type AssetsForceOrigin = EnsureRoot; + +// Called "Trust Backed" assets because these are generally registered by some account, and users of +// the asset assume it has some claimed backing. The pallet is called `Assets` in +// `construct_runtime` to avoid breaking changes on storage reads. +pub type TrustBackedAssetsInstance = pallet_assets::Instance1; +type TrustBackedAssetsCall = pallet_assets::Call; +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetIdForTrustBackedAssets; + type AssetIdParameter = codec::Compact; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = AssetsForceOrigin; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = AssetsStringLimit; + type Holder = (); + type Freezer = AssetsFreezer; + type Extra = (); + type WeightInfo = weights::pallet_assets_local::WeightInfo; + type CallbackHandle = pallet_assets::AutoIncAssetId; + type AssetAccountDeposit = AssetAccountDeposit; + type RemoveItemsLimit = ConstU32<1000>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +// Allow Freezes for the `Assets` pallet +pub type AssetsFreezerInstance = pallet_assets_freezer::Instance1; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + +parameter_types! { + pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); + pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); +} + +ord_parameter_types! { + pub const AssetConversionOrigin: sp_runtime::AccountId32 = + AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); +} + +pub type PoolAssetsInstance = pallet_assets::Instance3; +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type RemoveItemsLimit = ConstU32<1000>; + type AssetId = u32; + type AssetIdParameter = u32; + type Currency = Balances; + type CreateOrigin = + AsEnsureOriginWithArg>; + type ForceOrigin = AssetsForceOrigin; + type AssetDeposit = ConstU128<0>; + type AssetAccountDeposit = ConstU128<0>; + type MetadataDepositBase = ConstU128<0>; + type MetadataDepositPerByte = ConstU128<0>; + type ApprovalDeposit = ConstU128<0>; + type StringLimit = ConstU32<50>; + type Holder = (); + type Freezer = PoolAssetsFreezer; + type Extra = (); + type WeightInfo = weights::pallet_assets_pool::WeightInfo; + type CallbackHandle = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +// Allow Freezes for the `PoolAssets` pallet +pub type PoolAssetsFreezerInstance = pallet_assets_freezer::Instance3; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + +/// Union fungibles implementation for `Assets` and `ForeignAssets`. +pub type LocalAndForeignAssets = fungibles::UnionOf< + Assets, + ForeignAssets, + LocalFromLeft< + AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssets, + xcm::v5::Location, + >, + xcm::v5::Location, + AccountId, +>; + +/// Union fungibles implementation for `AssetsFreezer` and `ForeignAssetsFreezer`. +pub type LocalAndForeignAssetsFreezer = fungibles::UnionOf< + AssetsFreezer, + ForeignAssetsFreezer, + LocalFromLeft< + AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssets, + xcm::v5::Location, + >, + xcm::v5::Location, + AccountId, +>; + +/// Union fungibles implementation for [`LocalAndForeignAssets`] and `Balances`. +pub type NativeAndNonPoolAssets = fungible::UnionOf< + Balances, + LocalAndForeignAssets, + TargetFromLeft, + xcm::v5::Location, + AccountId, +>; + +/// Union fungibles implementation for [`LocalAndForeignAssetsFreezer`] and [`Balances`]. +pub type NativeAndNonPoolAssetsFreezer = fungible::UnionOf< + Balances, + LocalAndForeignAssetsFreezer, + TargetFromLeft, + xcm::v5::Location, + AccountId, +>; + +/// Union fungibles implementation for [`PoolAssets`] and [`NativeAndNonPoolAssets`]. +/// +/// NOTE: Should be kept updated to include ALL balances and assets in the runtime. +pub type NativeAndAllAssets = fungibles::UnionOf< + PoolAssets, + NativeAndNonPoolAssets, + LocalFromLeft< + AssetIdForPoolAssetsConvert, + AssetIdForPoolAssets, + xcm::v5::Location, + >, + xcm::v5::Location, + AccountId, +>; + +/// Union fungibles implementation for [`PoolAssetsFreezer`] and [`NativeAndNonPoolAssetsFreezer`]. +/// +/// NOTE: Should be kept updated to include ALL balances and assets in the runtime. +pub type NativeAndAllAssetsFreezer = fungibles::UnionOf< + PoolAssetsFreezer, + NativeAndNonPoolAssetsFreezer, + LocalFromLeft< + AssetIdForPoolAssetsConvert, + AssetIdForPoolAssets, + xcm::v5::Location, + >, + xcm::v5::Location, + AccountId, +>; + +pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< + AssetConversionPalletId, + (xcm::v5::Location, xcm::v5::Location), +>; + +impl pallet_asset_conversion::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type HigherPrecisionBalance = sp_core::U256; + type AssetKind = xcm::v5::Location; + type Assets = NativeAndNonPoolAssets; + type PoolId = (Self::AssetKind, Self::AssetKind); + type PoolLocator = pallet_asset_conversion::WithFirstAsset< + WestendLocation, + AccountId, + Self::AssetKind, + PoolIdToAccountId, + >; + type PoolAssetId = u32; + type PoolAssets = PoolAssets; + type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam + type PoolSetupFeeAsset = WestendLocation; + type PoolSetupFeeTarget = ResolveAssetTo; + type LiquidityWithdrawalFee = LiquidityWithdrawalFee; + type LPFee = ConstU32<3>; + type PalletId = AssetConversionPalletId; + type MaxSwapPathLength = ConstU32<3>; + type MintMinLiquidity = ConstU128<100>; + type WeightInfo = weights::pallet_asset_conversion::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = assets_common::benchmarks::AssetPairFactory< + WestendLocation, + parachain_info::Pallet, + xcm_config::TrustBackedAssetsPalletIndex, + xcm::v5::Location, + >; +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct PalletAssetRewardsBenchmarkHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl pallet_asset_rewards::benchmarking::BenchmarkHelper + for PalletAssetRewardsBenchmarkHelper +{ + fn staked_asset() -> Location { + Location::new( + 0, + [PalletInstance(::index() as u8), GeneralIndex(100)], + ) + } + fn reward_asset() -> Location { + Location::new( + 0, + [PalletInstance(::index() as u8), GeneralIndex(101)], + ) + } +} + +parameter_types! { + pub const MinVestedTransfer: Balance = 100 * CENTS; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); +} + +impl pallet_vesting::Config for Runtime { + const MAX_VESTING_SCHEDULES: u32 = 100; + type BlockNumberProvider = RelayChainBlockNumberProvider; + type BlockNumberToBalance = ConvertInto; + type Currency = Balances; + type MinVestedTransfer = MinVestedTransfer; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_vesting::WeightInfo; + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; +} + +parameter_types! { + pub const AssetRewardsPalletId: PalletId = PalletId(*b"py/astrd"); + pub const RewardsPoolCreationHoldReason: RuntimeHoldReason = + RuntimeHoldReason::AssetRewards(pallet_asset_rewards::HoldReason::PoolCreation); + // 1 item, 135 bytes into the storage on pool creation. + pub const StakePoolCreationDeposit: Balance = deposit(1, 135); +} + +impl pallet_asset_rewards::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = AssetRewardsPalletId; + type Balance = Balance; + type Assets = NativeAndAllAssets; + type AssetsFreezer = NativeAndAllAssetsFreezer; + type AssetId = xcm::v5::Location; + type CreatePoolOrigin = EnsureSigned; + type RuntimeFreezeReason = RuntimeFreezeReason; + type Consideration = HoldConsideration< + AccountId, + Balances, + RewardsPoolCreationHoldReason, + ConstantStoragePrice, + >; + type WeightInfo = weights::pallet_asset_rewards::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = PalletAssetRewardsBenchmarkHelper; +} + +impl pallet_asset_conversion_ops::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PriorAccountIdConverter = pallet_asset_conversion::AccountIdConverterNoSeed< + ::PoolId, + >; + type AssetsRefund = ::Assets; + type PoolAssetsRefund = ::PoolAssets; + type PoolAssetsTeam = ::PoolAssets; + type DepositAsset = Balances; + type WeightInfo = weights::pallet_asset_conversion_ops::WeightInfo; +} + +parameter_types! { + // we just reuse the same deposits + pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); + pub const ForeignAssetsAssetAccountDeposit: Balance = AssetAccountDeposit::get(); + pub const ForeignAssetsApprovalDeposit: Balance = ApprovalDeposit::get(); + pub const ForeignAssetsAssetsStringLimit: u32 = AssetsStringLimit::get(); + pub const ForeignAssetsMetadataDepositBase: Balance = MetadataDepositBase::get(); + pub const ForeignAssetsMetadataDepositPerByte: Balance = MetadataDepositPerByte::get(); +} + +/// Assets managed by some foreign location. Note: we do not declare a `ForeignAssetsCall` type, as +/// this type is used in proxy definitions. We assume that a foreign location would not want to set +/// an individual, local account as a proxy for the issuance of their assets. This issuance should +/// be managed by the foreign location's governance. +pub type ForeignAssetsInstance = pallet_assets::Instance2; +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = xcm::v5::Location; + type AssetIdParameter = xcm::v5::Location; + type Currency = Balances; + type CreateOrigin = ForeignCreators< + ( + FromSiblingParachain, xcm::v5::Location>, + FromNetwork, + xcm_config::bridging::to_rococo::RococoAssetFromAssetHubRococo, + ), + LocationToAccountId, + AccountId, + xcm::v5::Location, + >; + type ForceOrigin = AssetsForceOrigin; + type AssetDeposit = ForeignAssetsAssetDeposit; + type MetadataDepositBase = ForeignAssetsMetadataDepositBase; + type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; + type ApprovalDeposit = ForeignAssetsApprovalDeposit; + type StringLimit = ForeignAssetsAssetsStringLimit; + type Holder = (); + type Freezer = ForeignAssetsFreezer; + type Extra = (); + type WeightInfo = weights::pallet_assets_foreign::WeightInfo; + type CallbackHandle = (); + type AssetAccountDeposit = ForeignAssetsAssetAccountDeposit; + type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; +} + +// Allow Freezes for the `ForeignAssets` pallet +pub type ForeignAssetsFreezerInstance = pallet_assets_freezer::Instance2; +impl pallet_assets_freezer::Config for Runtime { + type RuntimeFreezeReason = RuntimeFreezeReason; + type RuntimeEvent = RuntimeEvent; +} + +parameter_types! { + // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + pub const DepositBase: Balance = deposit(1, 88); + // Additional storage item size of 32 bytes. + pub const DepositFactor: Balance = deposit(0, 32); + pub const MaxSignatories: u32 = 100; +} + +impl pallet_multisig::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type DepositBase = DepositBase; + type DepositFactor = DepositFactor; + type MaxSignatories = MaxSignatories; + type WeightInfo = weights::pallet_multisig::WeightInfo; + // TODO add migration. + type BlockNumberProvider = RelayChainBlockNumberProvider; +} + +impl pallet_utility::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = weights::pallet_utility::WeightInfo; +} + +parameter_types! { + // One storage item; key size 32, value size 8; . + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + pub const MaxProxies: u16 = 32; + // One storage item; key size 32, value size 16 + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; +} + +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + DecodeWithMemTracking, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds or assets. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Assets proxy. Can execute any call from `assets`, **including asset transfers**. + Assets, + /// Owner proxy. Can execute calls related to asset ownership. + AssetOwner, + /// Asset manager. Can execute calls related to asset management. + AssetManager, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} + +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!( + c, + RuntimeCall::Balances { .. } | + RuntimeCall::Assets { .. } | + RuntimeCall::NftFractionalization { .. } | + RuntimeCall::Nfts { .. } | + RuntimeCall::Uniques { .. } + ), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Assets => { + matches!( + c, + RuntimeCall::Assets { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } | + RuntimeCall::NftFractionalization { .. } | + RuntimeCall::Nfts { .. } | + RuntimeCall::Uniques { .. } + ) + }, + ProxyType::AssetOwner => matches!( + c, + RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::AssetManager => matches!( + c, + RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) | + RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) | + RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) | + RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::Assets, ProxyType::AssetOwner) => true, + (ProxyType::Assets, ProxyType::AssetManager) => true, + (ProxyType::NonTransfer, ProxyType::Collator) => true, + _ => false, + } + } +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; + // TODO add migration. + type BlockNumberProvider = RelayChainBlockNumberProvider; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type OutboundXcmpMessageSource = XcmpQueue; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = ConsensusHook; + type SelectCore = cumulus_pallet_parachain_system::DefaultCoreSelector; +} + +type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 103 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; +} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = AssetId(xcm_config::WestendLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = PolkadotXcm; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = ConstU32<1_000>; + type MaxActiveOutboundChannels = ConstU32<128>; + // Most on-chain HRMP channels are configured to use 102400 bytes of max message size, so we + // need to set the page size larger than that until we reduce the channel size on-chain. + type MaxPageSize = ConstU32<{ 103 * 1024 }>; + type ControllerOrigin = EnsureRoot; + type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; + type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; +} + +impl cumulus_pallet_xcmp_queue::migration::v5::V5Config for Runtime { + // This must be the same as the `ChannelInfo` from the `Config`: + type ChannelList = ParachainSystem; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +parameter_types! { + pub const Period: u32 = 6 * HOURS; + pub const Offset: u32 = 0; +} + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type DisablingStrategy = (); + type WeightInfo = weights::pallet_session::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + type SlotDuration = ConstU64; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; +} + +pub type CollatorSelectionUpdateOrigin = EnsureRoot; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = Period; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = weights::pallet_collator_selection::WeightInfo; +} + +parameter_types! { + pub StakingPot: AccountId = CollatorSelection::account_id(); +} + +impl pallet_asset_conversion_tx_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type AssetId = xcm::v5::Location; + type OnChargeAssetTransaction = SwapAssetAdapter< + WestendLocation, + NativeAndNonPoolAssets, + AssetConversion, + ResolveAssetTo, + >; + type WeightInfo = weights::pallet_asset_conversion_tx_payment::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = AssetConversionTxHelper; +} + +parameter_types! { + pub const UniquesCollectionDeposit: Balance = UNITS / 10; // 1 / 10 UNIT deposit to create a collection + pub const UniquesItemDeposit: Balance = UNITS / 1_000; // 1 / 1000 UNIT deposit to mint an item + pub const UniquesMetadataDepositBase: Balance = deposit(1, 129); + pub const UniquesAttributeDepositBase: Balance = deposit(1, 0); + pub const UniquesDepositPerByte: Balance = deposit(0, 1); +} + +impl pallet_uniques::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CollectionId = CollectionId; + type ItemId = ItemId; + type Currency = Balances; + type ForceOrigin = AssetsForceOrigin; + type CollectionDeposit = UniquesCollectionDeposit; + type ItemDeposit = UniquesItemDeposit; + type MetadataDepositBase = UniquesMetadataDepositBase; + type AttributeDepositBase = UniquesAttributeDepositBase; + type DepositPerByte = UniquesDepositPerByte; + type StringLimit = ConstU32<128>; + type KeyLimit = ConstU32<32>; + type ValueLimit = ConstU32<64>; + type WeightInfo = weights::pallet_uniques::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); + type CreateOrigin = AsEnsureOriginWithArg>; + type Locker = (); +} + +parameter_types! { + pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); + pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); + pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); +} + +impl pallet_nft_fractionalization::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Deposit = AssetDeposit; + type Currency = Balances; + type NewAssetSymbol = NewAssetSymbol; + type NewAssetName = NewAssetName; + type StringLimit = AssetsStringLimit; + type NftCollectionId = ::CollectionId; + type NftId = ::ItemId; + type AssetBalance = ::Balance; + type AssetId = >::AssetId; + type Assets = Assets; + type Nfts = Nfts; + type PalletId = NftFractionalizationPalletId; + type WeightInfo = weights::pallet_nft_fractionalization::WeightInfo; + type RuntimeHoldReason = RuntimeHoldReason; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +parameter_types! { + pub NftsPalletFeatures: PalletFeatures = PalletFeatures::all_enabled(); + pub const NftsMaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; + // re-use the Uniques deposits + pub const NftsCollectionDeposit: Balance = UniquesCollectionDeposit::get(); + pub const NftsItemDeposit: Balance = UniquesItemDeposit::get(); + pub const NftsMetadataDepositBase: Balance = UniquesMetadataDepositBase::get(); + pub const NftsAttributeDepositBase: Balance = UniquesAttributeDepositBase::get(); + pub const NftsDepositPerByte: Balance = UniquesDepositPerByte::get(); +} + +impl pallet_nfts::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CollectionId = CollectionId; + type ItemId = ItemId; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = AssetsForceOrigin; + type Locker = (); + type CollectionDeposit = NftsCollectionDeposit; + type ItemDeposit = NftsItemDeposit; + type MetadataDepositBase = NftsMetadataDepositBase; + type AttributeDepositBase = NftsAttributeDepositBase; + type DepositPerByte = NftsDepositPerByte; + type StringLimit = ConstU32<256>; + type KeyLimit = ConstU32<64>; + type ValueLimit = ConstU32<256>; + type ApprovalsLimit = ConstU32<20>; + type ItemAttributesApprovalsLimit = ConstU32<30>; + type MaxTips = ConstU32<10>; + type MaxDeadlineDuration = NftsMaxDeadlineDuration; + type MaxAttributesPerCall = ConstU32<10>; + type Features = NftsPalletFeatures; + type OffchainSignature = Signature; + type OffchainPublic = ::Signer; + type WeightInfo = weights::pallet_nfts::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type Helper = (); + type BlockNumberProvider = System; +} + +/// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global +/// consensus with dynamic fees and back-pressure. +pub type ToRococoXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance1; +impl pallet_xcm_bridge_hub_router::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_xcm_bridge_hub_router::WeightInfo; + + type UniversalLocation = xcm_config::UniversalLocation; + type SiblingBridgeHubLocation = xcm_config::bridging::SiblingBridgeHub; + type BridgedNetworkId = xcm_config::bridging::to_rococo::RococoNetwork; + type Bridges = xcm_config::bridging::NetworkExportTable; + type DestinationVersion = PolkadotXcm; + + type BridgeHubOrigin = frame_support::traits::EitherOfDiverse< + EnsureRoot, + EnsureXcm>, + >; + type ToBridgeHubSender = XcmpQueue; + type LocalXcmChannelManager = + cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider; + + type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; + type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; +} + +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = (); + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = weights::pallet_migrations::WeightInfo; +} + +impl pallet_sudo::Config for Runtime { + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = pallet_sudo::weights::SubstrateWeight; +} + +// impl pallet_root_offences::Config for Runtime { +// type RuntimeEvent = RuntimeEvent; +// } + +// Create the runtime by composing the FRAME pallets that were previously configured. +construct_runtime!( + pub enum Runtime + { + // System support stuff. + System: frame_system = 0, + ParachainSystem: cumulus_pallet_parachain_system = 1, + // RandomnessCollectiveFlip = 2 removed + Timestamp: pallet_timestamp = 3, + ParachainInfo: parachain_info = 4, + WeightReclaim: cumulus_pallet_weight_reclaim = 5, + MultiBlockMigrations: pallet_migrations = 6, + + // Monetary stuff. + Balances: pallet_balances = 10, + TransactionPayment: pallet_transaction_payment = 11, + // AssetTxPayment: pallet_asset_tx_payment = 12, + AssetTxPayment: pallet_asset_conversion_tx_payment = 13, + + // Collator support. the order of these 5 are important and shall not change. + Authorship: pallet_authorship = 20, + CollatorSelection: pallet_collator_selection = 21, + Session: pallet_session = 22, + Aura: pallet_aura = 23, + AuraExt: cumulus_pallet_aura_ext = 24, + + // XCM helpers. + XcmpQueue: cumulus_pallet_xcmp_queue = 30, + PolkadotXcm: pallet_xcm = 31, + CumulusXcm: cumulus_pallet_xcm = 32, + // Bridge utilities. + ToRococoXcmRouter: pallet_xcm_bridge_hub_router:: = 34, + MessageQueue: pallet_message_queue = 35, + + // Handy utilities. + Utility: pallet_utility = 40, + Multisig: pallet_multisig = 41, + Proxy: pallet_proxy = 42, + + // The main stage. + Assets: pallet_assets:: = 50, + Uniques: pallet_uniques = 51, + Nfts: pallet_nfts = 52, + ForeignAssets: pallet_assets:: = 53, + NftFractionalization: pallet_nft_fractionalization = 54, + PoolAssets: pallet_assets:: = 55, + AssetConversion: pallet_asset_conversion = 56, + + AssetsFreezer: pallet_assets_freezer:: = 57, + ForeignAssetsFreezer: pallet_assets_freezer:: = 58, + PoolAssetsFreezer: pallet_assets_freezer:: = 59, + + AssetRewards: pallet_asset_rewards = 61, + + StateTrieMigration: pallet_state_trie_migration = 70, + + // Staking. + Staking: pallet_staking_async = 80, + NominationPools: pallet_nomination_pools = 81, + FastUnstake: pallet_fast_unstake = 82, + VoterList: pallet_bags_list:: = 83, + DelegatedStaking: pallet_delegated_staking = 84, + StakingNextRcClient: pallet_staking_async_rc_client = 89, + + // Election apparatus. + MultiBlock: pallet_election_provider_multi_block = 85, + MultiBlockVerifier: pallet_election_provider_multi_block::verifier = 86, + MultiBlockUnsigned: pallet_election_provider_multi_block::unsigned = 87, + MultiBlockSigned: pallet_election_provider_multi_block::signed = 88, + + // Governance. + Preimage: pallet_preimage = 90, + Scheduler: pallet_scheduler = 91, + ConvictionVoting: pallet_conviction_voting = 92, + Referenda: pallet_referenda = 93, + Origins: pallet_custom_origins = 94, + Whitelist: pallet_whitelist = 95, + Treasury: pallet_treasury = 96, + AssetRate: pallet_asset_rate = 97, + + // Balances. + Vesting: pallet_vesting = 100, + + // AHN specific. + Sudo: pallet_sudo = 110, + // RootOffences: pallet_root_offences = 111, + + // TODO: the pallet instance should be removed once all pools have migrated + // to the new account IDs. + AssetConversionMigration: pallet_asset_conversion_ops = 200, + } +); + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The extension to the basic transaction logic. +pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim< + Runtime, + ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + frame_metadata_hash_extension::CheckMetadataHash, + ), +>; + +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + +/// Migrations to apply on runtime upgrade. +pub type Migrations = ( + // permanent + pallet_xcm::migration::MigrateToLatestXcmVersion, +); + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + Migrations, +>; + +#[cfg(feature = "runtime-benchmarks")] +pub struct AssetConversionTxHelper; + +#[cfg(feature = "runtime-benchmarks")] +impl + pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< + AccountId, + cumulus_primitives_core::Location, + cumulus_primitives_core::Location, + > for AssetConversionTxHelper +{ + fn create_asset_id_parameter( + seed: u32, + ) -> (cumulus_primitives_core::Location, cumulus_primitives_core::Location) { + // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. + let asset_id = cumulus_primitives_core::Location::new( + 1, + [ + cumulus_primitives_core::Junction::Parachain(3000), + cumulus_primitives_core::Junction::PalletInstance(53), + cumulus_primitives_core::Junction::GeneralIndex(seed.into()), + ], + ); + (asset_id.clone(), asset_id) + } + + fn setup_balances_and_pool(asset_id: cumulus_primitives_core::Location, account: AccountId) { + use frame_support::{assert_ok, traits::fungibles::Mutate}; + assert_ok!(ForeignAssets::force_create( + RuntimeOrigin::root(), + asset_id.clone().into(), + account.clone().into(), /* owner */ + true, /* is_sufficient */ + 1, + )); + + let lp_provider = account.clone(); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); + assert_ok!(ForeignAssets::mint_into( + asset_id.clone().into(), + &lp_provider, + u64::MAX.into() + )); + + let token_native = alloc::boxed::Box::new(cumulus_primitives_core::Location::new( + 1, + cumulus_primitives_core::Junctions::Here, + )); + let token_second = alloc::boxed::Box::new(asset_id); + + assert_ok!(AssetConversion::create_pool( + RuntimeOrigin::signed(lp_provider.clone()), + token_native.clone(), + token_second.clone() + )); + + assert_ok!(AssetConversion::add_liquidity( + RuntimeOrigin::signed(lp_provider.clone()), + token_native, + token_second, + (u32::MAX / 2).into(), // 1 desired + u32::MAX.into(), // 2 desired + 1, // 1 min + 1, // 2 min + lp_provider, + )); + } +} + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + frame_benchmarking::define_benchmarks!( + [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] + [pallet_asset_conversion_ops, AssetConversionMigration] + [pallet_asset_rate, AssetRate] + [pallet_assets, Local] + [pallet_assets, Foreign] + [pallet_assets, Pool] + [pallet_asset_conversion, AssetConversion] + [pallet_asset_rewards, AssetRewards] + [pallet_asset_conversion_tx_payment, AssetTxPayment] + [pallet_staking_async, Staking] + [pallet_bags_list, VoterList] + [pallet_balances, Balances] + [pallet_conviction_voting, ConvictionVoting] + [pallet_election_provider_multi_block, MultiBlock] + [pallet_election_provider_multi_block_verifier, MultiBlockVerifier] + [pallet_election_provider_multi_block_unsigned, MultiBlockUnsigned] + [pallet_election_provider_multi_block_signed, MultiBlockSigned] + [pallet_fast_unstake, FastUnstake] + [pallet_message_queue, MessageQueue] + [pallet_migrations, MultiBlockMigrations] + [pallet_multisig, Multisig] + [pallet_nft_fractionalization, NftFractionalization] + [pallet_nfts, Nfts] + [pallet_proxy, Proxy] + [pallet_session, SessionBench::] + [pallet_sudo, Sudo] + [pallet_uniques, Uniques] + [pallet_utility, Utility] + [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] + [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] + [cumulus_pallet_xcmp_queue, XcmpQueue] + [pallet_treasury, Treasury] + [pallet_vesting, Vesting] + [pallet_whitelist, Whitelist] + [pallet_xcm_bridge_hub_router, ToRococo] + [pallet_asset_conversion_ops, AssetConversionMigration] + // XCM + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] + // NOTE: Make sure you point to the individual modules below. + [pallet_xcm_benchmarks::fungible, XcmBalances] + [pallet_xcm_benchmarks::generic, XcmGeneric] + [cumulus_pallet_weight_reclaim, WeightReclaim] + ); +} + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) + } + + fn authorities() -> Vec { + pallet_aura::Authorities::::get().into_inner() + } + } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> alloc::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_nfts_runtime_api::NftsApi for Runtime { + fn owner(collection: u32, item: u32) -> Option { + >::owner(&collection, &item) + } + + fn collection_owner(collection: u32) -> Option { + >::collection_owner(&collection) + } + + fn attribute( + collection: u32, + item: u32, + key: Vec, + ) -> Option> { + >::attribute(&collection, &item, &key) + } + + fn custom_attribute( + account: AccountId, + collection: u32, + item: u32, + key: Vec, + ) -> Option> { + >::custom_attribute( + &account, + &collection, + &item, + &key, + ) + } + + fn system_attribute( + collection: u32, + item: Option, + key: Vec, + ) -> Option> { + >::system_attribute(&collection, item.as_ref(), &key) + } + + fn collection_attribute(collection: u32, key: Vec) -> Option> { + >::collection_attribute(&collection, &key) + } + } + + impl pallet_asset_conversion::AssetConversionApi< + Block, + Balance, + xcm::v5::Location, + > for Runtime + { + fn quote_price_exact_tokens_for_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { + AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) + } + + fn quote_price_tokens_for_exact_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { + AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) + } + + fn get_reserves(asset1: xcm::v5::Location, asset2: xcm::v5::Location) -> Option<(Balance, Balance)> { + AssetConversion::get_reserves(asset1, asset2).ok() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let native_token = xcm_config::WestendLocation::get(); + // We accept the native token to pay fees. + let mut acceptable_assets = vec![AssetId(native_token.clone())]; + // We also accept all assets in a pool with the native token. + acceptable_assets.extend( + assets_common::PoolAdapter::::get_assets_in_pool_with(native_token) + .map_err(|()| XcmPaymentApiError::VersionedConversionFailed)? + ); + PolkadotXcm::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let native_asset = xcm_config::WestendLocation::get(); + let fee_in_native = WeightToFee::weight_to_fee(&weight); + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { + Ok(asset_id) if asset_id.0 == native_asset => { + // for native asset + Ok(fee_in_native) + }, + Ok(asset_id) => { + // Try to get current price of `asset_id` in `native_asset`. + if let Ok(Some(swapped_in_native)) = assets_common::PoolAdapter::::quote_price_tokens_for_exact_tokens( + asset_id.0.clone(), + native_asset, + fee_in_native, + true, // We include the fee. + ) { + Ok(swapped_in_native) + } else { + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + } + }, + Err(_) => { + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + PolkadotXcm::query_delivery_fees(destination, message) + } + } + + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall, result_xcms_version: xcm::prelude::XcmVersion) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_call::(origin, call, result_xcms_version) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + PolkadotXcm::dry_run_xcm::(origin_location, xcm) + } + } + + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationToAccountId, + >::convert_location(location) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl assets_common::runtime_api::FungiblesApi< + Block, + AccountId, + > for Runtime + { + fn query_account_balances(account: AccountId) -> Result { + use assets_common::fungible_conversion::{convert, convert_balance}; + Ok([ + // collect pallet_balance + { + let balance = Balances::free_balance(account.clone()); + if balance > 0 { + vec![convert_balance::(balance)?] + } else { + vec![] + } + }, + // collect pallet_assets (TrustBackedAssets) + convert::<_, _, _, _, TrustBackedAssetsConvertedConcreteId>( + Assets::account_balances(account.clone()) + .iter() + .filter(|(_, balance)| balance > &0) + )?, + // collect pallet_assets (ForeignAssets) + convert::<_, _, _, _, ForeignAssetsConvertedConcreteId>( + ForeignAssets::account_balances(account.clone()) + .iter() + .filter(|(_, balance)| balance > &0) + )?, + // collect pallet_assets (PoolAssets) + convert::<_, _, _, _, PoolAssetsConvertedConcreteId>( + PoolAssets::account_balances(account) + .iter() + .filter(|(_, balance)| balance > &0) + )?, + // collect ... e.g. other tokens + ].concat().into()) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + impl pallet_asset_rewards::AssetRewards for Runtime { + fn pool_creation_cost() -> Balance { + StakePoolCreationDeposit::get() + } + } + + impl cumulus_primitives_core::GetCoreSelectorApi for Runtime { + fn core_selector() -> (CoreSelector, ClaimQueueOffset) { + ParachainSystem::core_selector() + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + + impl pallet_nomination_pools_runtime_api::NominationPoolsApi< + Block, + AccountId, + Balance, + > for Runtime { + fn pending_rewards(member: AccountId) -> Balance { + NominationPools::api_pending_rewards(member).unwrap_or_default() + } + + fn points_to_balance(pool_id: PoolId, points: Balance) -> Balance { + NominationPools::api_points_to_balance(pool_id, points) + } + + fn balance_to_points(pool_id: PoolId, new_funds: Balance) -> Balance { + NominationPools::api_balance_to_points(pool_id, new_funds) + } + + fn pool_pending_slash(pool_id: PoolId) -> Balance { + NominationPools::api_pool_pending_slash(pool_id) + } + + fn member_pending_slash(member: AccountId) -> Balance { + NominationPools::api_member_pending_slash(member) + } + + fn pool_needs_delegate_migration(pool_id: PoolId) -> bool { + NominationPools::api_pool_needs_delegate_migration(pool_id) + } + + fn member_needs_delegate_migration(member: AccountId) -> bool { + NominationPools::api_member_needs_delegate_migration(member) + } + + fn member_total_balance(member: AccountId) -> Balance { + NominationPools::api_member_total_balance(member) + } + + fn pool_balance(pool_id: PoolId) -> Balance { + NominationPools::api_pool_balance(pool_id) + } + + fn pool_accounts(pool_id: PoolId) -> (AccountId, AccountId) { + NominationPools::api_pool_accounts(pool_id) + } + } + + impl pallet_staking_async_runtime_api::StakingApi for Runtime { + fn nominations_quota(balance: Balance) -> u32 { + Staking::api_nominations_quota(balance) + } + + fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { + Staking::api_eras_stakers_page_count(era, account) + } + + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool { + Staking::api_pending_rewards(era, account) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::BenchmarkList; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; + + // This is defined once again in dispatch_benchmark, because list_benchmarks! + // and add_benchmarks! are macros exported by define_benchmarks! macros and those types + // are referenced in that call. + type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; + type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + + // Benchmark files generated for `Assets/ForeignAssets` instances are by default + // `pallet_assets_assets.rs / pallet_assets_foreign_assets`, which is not really nice, + // so with this redefinition we can change names to nicer: + // `pallet_assets_local.rs / pallet_assets_foreign.rs`. + type Local = pallet_assets::Pallet::; + type Foreign = pallet_assets::Pallet::; + type Pool = pallet_assets::Pallet::; + + type ToRococo = XcmBridgeHubRouterBench; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, alloc::string::String> { + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; + + // add a few custom keys to benchmarks. + frame_benchmarking::benchmarking::add_to_whitelist( + crate::staking::MaxElectingVoters::key().to_vec().into() + ); + frame_benchmarking::benchmarking::add_to_whitelist( + crate::staking::Pages::key().to_vec().into() + ); + frame_benchmarking::benchmarking::add_to_whitelist( + crate::staking::SolutionImprovementThreshold::key().to_vec().into() + ); + frame_benchmarking::benchmarking::add_to_whitelist( + crate::staking::SignedPhase::key().to_vec().into() + ); + frame_benchmarking::benchmarking::add_to_whitelist( + crate::staking::UnsignedPhase::key().to_vec().into() + ); + frame_benchmarking::benchmarking::add_to_whitelist( + crate::staking::SignedValidationPhase::key().to_vec().into() + ); + + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &alloc::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + WestendLocation::get(), + ExistentialDeposit::get() + ).into()); + pub const RandomParaId: ParaId = ParaId::new(43211234); + } + + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + PriceForSiblingParachainDelivery, + RandomParaId, + ParachainSystem, + > + ); + + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(Asset, Location)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + Asset { + fun: Fungible(ExistentialDeposit::get()), + id: AssetId(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { + Some(( + Asset { + fun: Fungible(ExistentialDeposit::get()), + id: AssetId(Parent.into()) + }, + // AH can reserve transfer native token to some random parachain. + ParentThen(Parachain(RandomParaId::get().into()).into()).into(), + )) + } + + fn set_up_complex_asset_transfer( + ) -> Option<(XcmAssets, u32, Location, alloc::boxed::Box)> { + // Transfer to Relay some local AH asset (local-reserve-transfer) while paying + // fees using teleported native token. + // (We don't care that Relay doesn't accept incoming unknown AH local asset) + let dest = Parent.into(); + + let fee_amount = EXISTENTIAL_DEPOSIT; + let fee_asset: Asset = (Location::parent(), fee_amount).into(); + + let who = frame_benchmarking::whitelisted_caller(); + // Give some multiple of the existential deposit + let balance = fee_amount + EXISTENTIAL_DEPOSIT * 1000; + let _ = >::make_free_balance_be( + &who, balance, + ); + // verify initial balance + assert_eq!(Balances::free_balance(&who), balance); + + // set up local asset + let asset_amount = 10u128; + let initial_asset_amount = asset_amount * 10; + let (asset_id, _, _) = pallet_assets::benchmarking::create_default_minted_asset::< + Runtime, + pallet_assets::Instance1 + >(true, initial_asset_amount); + let asset_location = Location::new( + 0, + [PalletInstance(50), GeneralIndex(u32::from(asset_id).into())] + ); + let transfer_asset: Asset = (asset_location, asset_amount).into(); + + let assets: XcmAssets = vec![fee_asset.clone(), transfer_asset].into(); + let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; + + // verify transferred successfully + let verify = alloc::boxed::Box::new(move || { + // verify native balance after transfer, decreased by transferred fee amount + // (plus transport fees) + assert!(Balances::free_balance(&who) <= balance - fee_amount); + // verify asset balance decreased by exactly transferred amount + assert_eq!( + Assets::balance(asset_id.into(), &who), + initial_asset_amount - asset_amount, + ); + }); + Some((assets, fee_index as u32, dest, verify)) + } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::parent()), + fun: Fungible(ExistentialDeposit::get()), + } + } + } + + use pallet_xcm_bridge_hub_router::benchmarking::{ + Pallet as XcmBridgeHubRouterBench, + Config as XcmBridgeHubRouterConfig, + }; + + impl XcmBridgeHubRouterConfig for Runtime { + fn make_congested() { + cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( + xcm_config::bridging::SiblingBridgeHubParaId::get().into() + ); + } + fn ensure_bridged_target_destination() -> Result { + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + xcm_config::bridging::SiblingBridgeHubParaId::get().into() + ); + let bridged_asset_hub = xcm_config::bridging::to_rococo::AssetHubRococo::get(); + let _ = PolkadotXcm::force_xcm_version( + RuntimeOrigin::root(), + alloc::boxed::Box::new(bridged_asset_hub.clone()), + XCM_VERSION, + ).map_err(|e| { + log::error!( + "Failed to dispatch `force_xcm_version({:?}, {:?}, {:?})`, error: {:?}", + RuntimeOrigin::root(), + bridged_asset_hub, + XCM_VERSION, + e + ); + BenchmarkError::Stop("XcmVersion was not stored!") + })?; + Ok(bridged_asset_hub) + } + } + + use xcm_config::{MaxAssetsIntoHolding, WestendLocation}; + use pallet_xcm_benchmarks::asset_instance_from; + + impl pallet_xcm_benchmarks::Config for Runtime { + type XcmConfig = xcm_config::XcmConfig; + type AccountIdConverter = xcm_config::LocationToAccountId; + type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForParentDelivery, + >; + fn valid_destination() -> Result { + Ok(WestendLocation::get()) + } + fn worst_case_holding(depositable_count: u32) -> XcmAssets { + // A mix of fungible, non-fungible, and concrete assets. + let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; + let holding_fungibles = holding_non_fungibles - 2; // -2 for two `iter::once` bellow + let fungibles_amount: u128 = 100; + (0..holding_fungibles) + .map(|i| { + Asset { + id: AssetId(GeneralIndex(i as u128).into()), + fun: Fungible(fungibles_amount * (i + 1) as u128), // non-zero amount + } + }) + .chain(core::iter::once(Asset { id: AssetId(Here.into()), fun: Fungible(u128::MAX) })) + .chain(core::iter::once(Asset { id: AssetId(WestendLocation::get()), fun: Fungible(1_000_000 * UNITS) })) + .chain((0..holding_non_fungibles).map(|i| Asset { + id: AssetId(GeneralIndex(i as u128).into()), + fun: NonFungible(asset_instance_from(i)), + })) + .collect::>() + .into() + } + } + + parameter_types! { + pub const TrustedTeleporter: Option<(Location, Asset)> = Some(( + WestendLocation::get(), + Asset { fun: Fungible(UNITS), id: AssetId(WestendLocation::get()) }, + )); + pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; + // AssetHubNextWestend trusts AssetHubRococo as reserve for ROCs + pub TrustedReserve: Option<(Location, Asset)> = Some( + ( + xcm_config::bridging::to_rococo::AssetHubRococo::get(), + Asset::from((xcm_config::bridging::to_rococo::RocLocation::get(), 1000000000000 as u128)) + ) + ); + } + + impl pallet_xcm_benchmarks::fungible::Config for Runtime { + type TransactAsset = Balances; + + type CheckedAccount = CheckedAccount; + type TrustedTeleporter = TrustedTeleporter; + type TrustedReserve = TrustedReserve; + + fn get_asset() -> Asset { + Asset { + id: AssetId(WestendLocation::get()), + fun: Fungible(UNITS), + } + } + } + + impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; + type RuntimeCall = RuntimeCall; + + fn worst_case_response() -> (u64, Response) { + (0u64, Response::Version(Default::default())) + } + + fn worst_case_asset_exchange() -> Result<(XcmAssets, XcmAssets), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn universal_alias() -> Result<(Location, Junction), BenchmarkError> { + xcm_config::bridging::BridgingBenchmarksHelper::prepare_universal_alias() + .ok_or(BenchmarkError::Skip) + } + + fn transact_origin_and_runtime_call() -> Result<(Location, RuntimeCall), BenchmarkError> { + Ok((WestendLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + } + + fn subscribe_origin() -> Result { + Ok(WestendLocation::get()) + } + + fn claimable_asset() -> Result<(Location, Location, XcmAssets), BenchmarkError> { + let origin = WestendLocation::get(); + let assets: XcmAssets = (AssetId(WestendLocation::get()), 1_000 * UNITS).into(); + let ticket = Location { parents: 0, interior: Here }; + Ok((origin, ticket, assets)) + } + + fn fee_asset() -> Result { + Ok(Asset { + id: AssetId(WestendLocation::get()), + fun: Fungible(1_000 * UNITS), + }) + } + + fn unlockable_asset() -> Result<(Location, Location, Asset), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn export_message_origin_and_destination( + ) -> Result<(Location, NetworkId, InteriorLocation), BenchmarkError> { + Err(BenchmarkError::Skip) + } + + fn alias_origin() -> Result<(Location, Location), BenchmarkError> { + // Any location can alias to an internal location. + // Here parachain 1001 aliases to an internal account. + Ok(( + Location::new(1, [Parachain(1001)]), + Location::new(1, [Parachain(1001), AccountId32 { id: [111u8; 32], network: None }]), + )) + } + } + + type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; + type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + + type Local = pallet_assets::Pallet::; + type Foreign = pallet_assets::Pallet::; + type Pool = pallet_assets::Pallet::; + + type ToRococo = XcmBridgeHubRouterBench; + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn build_state(config: Vec) -> sp_genesis_builder::Result { + let res = build_state::(config); + // tweak some of our parameter-types as well.. + match pallet_staking_async::ValidatorCount::::get() { + 500 => { + log::info!(target: "runtime", "detected a polkadot-like chain during `build_state`"); + // this is a polkadot-like chain + crate::staking::MaxElectingVoters::set(&22_500); + crate::staking::Pages::set(&32); + }, + 1000 => { + log::info!(target: "runtime", "detected a kusama-like chain during `build_state`"); + // this is a kusama-like chain + crate::staking::MaxElectingVoters::set(&12_500); + crate::staking::Pages::set(&16); + } + 10 => { + log::info!(target: "runtime", "detected a dev chain during `build_state`"); + // this is a dev-chain -- no change needed + }, + _ => { + panic!("Unrecognized validator count -- genesis-config and this block should match"); + } + } + + res + } + + fn get_preset(id: &Option) -> Option> { + get_preset::(id, &genesis_config_presets::get_preset) + } + + fn preset_names() -> Vec { + genesis_config_presets::preset_names() + } + } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> xcm_runtime_apis::trusted_query::XcmTrustedQueryResult { + PolkadotXcm::is_trusted_teleporter(asset, location) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} + +parameter_types! { + // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) + pub const MigrationSignedDepositPerItem: Balance = CENTS; + pub const MigrationSignedDepositBase: Balance = 2_000 * CENTS; + pub const MigrationMaxKeyLen: u32 = 512; +} + +impl pallet_state_trie_migration::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type RuntimeHoldReason = RuntimeHoldReason; + type SignedDepositPerItem = MigrationSignedDepositPerItem; + type SignedDepositBase = MigrationSignedDepositBase; + // An origin that can control the whole pallet: should be Root, or a part of your council. + type ControlOrigin = frame_system::EnsureSignedBy; + // specific account for the migration, can trigger the signed migrations. + type SignedFilter = frame_system::EnsureSignedBy; + + // Replace this with weight based on your runtime. + type WeightInfo = pallet_state_trie_migration::weights::SubstrateWeight; + + type MaxKeyLen = MigrationMaxKeyLen; +} + +frame_support::ord_parameter_types! { + pub const MigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); + pub const RootMigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); +} + +#[test] +fn ensure_key_ss58() { + use frame_support::traits::SortedMembers; + use sp_core::crypto::Ss58Codec; + let acc = + AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); + assert_eq!(acc, MigController::sorted_members()[0]); + let acc = + AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); + assert_eq!(acc, RootMigController::sorted_members()[0]); +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/staking.rs b/substrate/frame/staking-async/runtimes/parachain/src/staking.rs new file mode 100644 index 0000000000000..86225a7402be0 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/staking.rs @@ -0,0 +1,475 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::marker::PhantomData; + +///! Staking, and election related pallet configurations. +use super::*; +use cumulus_primitives_core::relay_chain::SessionIndex; +use frame_election_provider_support::{ElectionDataProvider, SequentialPhragmen}; +use frame_support::traits::{ConstU128, EitherOf}; +use pallet_election_provider_multi_block::{ + self as multi_block, weights::measured, SolutionAccuracyOf, +}; +use pallet_staking_async::UseValidatorsMap; +use polkadot_runtime_common::{prod_or_fast, BalanceToU256, U256ToBalance}; +use sp_runtime::{ + transaction_validity::TransactionPriority, FixedPointNumber, FixedU128, SaturatedConversion, +}; + +parameter_types! { + pub storage SignedPhase: u32 = 3 * MINUTES; + pub storage UnsignedPhase: u32 = 1 * MINUTES; + pub storage SignedValidationPhase: u32 = Pages::get() + 1; + + /// Compatible with Polkadot, we allow up to 22_500 nominators to be considered for election + pub storage MaxElectingVoters: u32 = 2000; + + /// Maximum number of validators that we may want to elect. 1000 is the end target. + pub const MaxValidatorSet: u32 = 1000; + + /// Number of election pages that we operate upon. + pub storage Pages: u32 = 4; + + /// Number of nominators per page of the snapshot, and consequently number of backers in the solution. + pub VoterSnapshotPerBlock: u32 = MaxElectingVoters::get() / Pages::get(); + + /// Number of validators per page of the snapshot. + pub const TargetSnapshotPerBlock: u32 = MaxValidatorSet::get(); + + /// In each page, we may observe up to all of the validators. + pub const MaxWinnersPerPage: u32 = MaxValidatorSet::get(); + + /// In each page of the election, we allow up to all of the nominators of that page to be present. + pub MaxBackersPerWinner: u32 = VoterSnapshotPerBlock::get(); + + /// Total number of backers per winner across all pages. This is not used in the code yet. + pub MaxBackersPerWinnerFinal: u32 = MaxBackersPerWinner::get(); + + /// Size of the exposures. This should be small enough to make the reward payouts feasible. + pub const MaxExposurePageSize: u32 = 64; + + /// Each solution is considered "better" if it is 0.01% better. + pub storage SolutionImprovementThreshold: Perbill = Perbill::from_rational(1u32, 10_000); +} + +frame_election_provider_support::generate_solution_type!( + #[compact] + pub struct NposCompactSolution16::< + VoterIndex = u16, + TargetIndex = u16, + Accuracy = sp_runtime::PerU16, + MaxVoters = VoterSnapshotPerBlock, + >(16) +); + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub BenchElectionBounds: frame_election_provider_support::bounds::ElectionBounds = + frame_election_provider_support::bounds::ElectionBoundsBuilder::default().build(); +} + +#[cfg(feature = "runtime-benchmarks")] +pub struct OnChainConfig; + +#[cfg(feature = "runtime-benchmarks")] +impl frame_election_provider_support::onchain::Config for OnChainConfig { + // unbounded + type Bounds = BenchElectionBounds; + // We should not need sorting, as our bounds are large enough for the number of + // nominators/validators in this test setup. + type Sort = ConstBool; + type DataProvider = Staking; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; + type Solver = frame_election_provider_support::SequentialPhragmen; + type System = Runtime; + type WeightInfo = (); +} + +impl multi_block::Config for Runtime { + type AreWeDone = multi_block::RevertToSignedIfNotQueuedOf; + type Pages = Pages; + type UnsignedPhase = UnsignedPhase; + type SignedPhase = SignedPhase; + type SignedValidationPhase = SignedValidationPhase; + type VoterSnapshotPerBlock = VoterSnapshotPerBlock; + type TargetSnapshotPerBlock = TargetSnapshotPerBlock; + type AdminOrigin = EnsureRoot; + type DataProvider = Staking; + #[cfg(not(feature = "runtime-benchmarks"))] + type Fallback = multi_block::Continue; + #[cfg(feature = "runtime-benchmarks")] + type Fallback = frame_election_provider_support::onchain::OnChainExecution; + type MinerConfig = Self; + type Verifier = MultiBlockVerifier; + type WeightInfo = measured::pallet_election_provider_multi_block::SubstrateWeight; +} + +impl multi_block::verifier::Config for Runtime { + type MaxWinnersPerPage = MaxWinnersPerPage; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal; + type SolutionDataProvider = MultiBlockSigned; + type SolutionImprovementThreshold = SolutionImprovementThreshold; + type WeightInfo = + measured::pallet_election_provider_multi_block_verifier::SubstrateWeight; +} + +parameter_types! { + pub BailoutGraceRatio: Perbill = Perbill::from_percent(50); + pub EjectGraceRatio: Perbill = Perbill::from_percent(50); + pub DepositBase: Balance = 5 * UNITS; + pub DepositPerPage: Balance = 1 * UNITS; + pub RewardBase: Balance = 10 * UNITS; + pub MaxSubmissions: u32 = 8; +} + +impl multi_block::signed::Config for Runtime { + type RuntimeHoldReason = RuntimeHoldReason; + type Currency = Balances; + type BailoutGraceRatio = BailoutGraceRatio; + type EjectGraceRatio = EjectGraceRatio; + type DepositBase = DepositBase; + type DepositPerPage = DepositPerPage; + type RewardBase = RewardBase; + type MaxSubmissions = MaxSubmissions; + type EstimateCallFee = TransactionPayment; + type WeightInfo = measured::pallet_election_provider_multi_block_signed::SubstrateWeight; +} + +parameter_types! { + /// Priority of the offchain miner transactions. + pub MinerTxPriority: TransactionPriority = TransactionPriority::max_value() / 2; + + /// 1 hour session, 15 minutes unsigned phase, 4 offchain executions. + pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 4; +} + +impl multi_block::unsigned::Config for Runtime { + type MinerPages = ConstU32<2>; + type OffchainSolver = SequentialPhragmen>; + type MinerTxPriority = MinerTxPriority; + type OffchainRepeat = OffchainRepeat; + type WeightInfo = + measured::pallet_election_provider_multi_block_unsigned::SubstrateWeight; +} + +parameter_types! { + /// Miner transaction can fill up to 75% of the block size. + pub MinerMaxLength: u32 = Perbill::from_rational(75u32, 100) * + *RuntimeBlockLength::get() + .max + .get(DispatchClass::Normal); +} + +impl multi_block::unsigned::miner::MinerConfig for Runtime { + type AccountId = AccountId; + type Hash = Hash; + type MaxBackersPerWinner = ::MaxBackersPerWinner; + type MaxBackersPerWinnerFinal = + ::MaxBackersPerWinnerFinal; + type MaxWinnersPerPage = ::MaxWinnersPerPage; + type MaxVotesPerVoter = + <::DataProvider as ElectionDataProvider>::MaxVotesPerVoter; + type MaxLength = MinerMaxLength; + type Solver = ::OffchainSolver; + type Pages = Pages; + type Solution = NposCompactSolution16; + type VoterSnapshotPerBlock = ::VoterSnapshotPerBlock; + type TargetSnapshotPerBlock = ::TargetSnapshotPerBlock; +} + +parameter_types! { + pub const BagThresholds: &'static [u64] = &bag_thresholds::THRESHOLDS; +} + +type VoterBagsListInstance = pallet_bags_list::Instance1; +impl pallet_bags_list::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ScoreProvider = Staking; + type WeightInfo = weights::pallet_bags_list::WeightInfo; + type BagThresholds = BagThresholds; + type Score = sp_npos_elections::VoteWeight; +} + +pub struct EraPayout; +impl pallet_staking_async::EraPayout for EraPayout { + fn era_payout( + _total_staked: Balance, + _total_issuance: Balance, + era_duration_millis: u64, + ) -> (Balance, Balance) { + const MILLISECONDS_PER_YEAR: u64 = (1000 * 3600 * 24 * 36525) / 100; + // A normal-sized era will have 1 / 365.25 here: + let relative_era_len = + FixedU128::from_rational(era_duration_millis.into(), MILLISECONDS_PER_YEAR.into()); + + // Fixed total TI that we use as baseline for the issuance. + let fixed_total_issuance: i128 = 5_216_342_402_773_185_773; + let fixed_inflation_rate = FixedU128::from_rational(8, 100); + let yearly_emission = fixed_inflation_rate.saturating_mul_int(fixed_total_issuance); + + let era_emission = relative_era_len.saturating_mul_int(yearly_emission); + // 15% to treasury, as per Polkadot ref 1139. + let to_treasury = FixedU128::from_rational(15, 100).saturating_mul_int(era_emission); + let to_stakers = era_emission.saturating_sub(to_treasury); + + (to_stakers.saturated_into(), to_treasury.saturated_into()) + } +} + +parameter_types! { + // Six sessions in an era (6 hours). + pub const SessionsPerEra: SessionIndex = prod_or_fast!(6, 1); + // 2 eras for unbonding (12 hours). + pub const BondingDuration: sp_staking::EraIndex = 2; + // 1 era in which slashes can be cancelled (6 hours). + pub const SlashDeferDuration: sp_staking::EraIndex = 1; + // Note: this is not really correct as Max Nominators is (MaxExposurePageSize * page_count) but + // this is an unbounded number. We just set it to a reasonably high value, 1 full page + // of nominators. + pub const MaxControllersInDeprecationBatch: u32 = 751; + pub const MaxNominations: u32 = ::LIMIT as u32; +} + +impl pallet_staking_async::Config for Runtime { + type Filter = (); + type OldCurrency = Balances; + type Currency = Balances; + type CurrencyBalance = Balance; + type RuntimeHoldReason = RuntimeHoldReason; + type CurrencyToVote = sp_staking::currency_to_vote::SaturatingCurrencyToVote; + type RewardRemainder = (); + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = SlashDeferDuration; + type AdminOrigin = EitherOf, StakingAdmin>; + type EraPayout = EraPayout; + type MaxExposurePageSize = MaxExposurePageSize; + type ElectionProvider = MultiBlock; + type VoterList = VoterList; + type TargetList = UseValidatorsMap; + type MaxValidatorSet = MaxValidatorSet; + type NominationsQuota = pallet_staking_async::FixedNominationsQuota<{ MaxNominations::get() }>; + type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; + type HistoryDepth = frame_support::traits::ConstU32<84>; + type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; + type EventListeners = (NominationPools, DelegatedStaking); + type WeightInfo = weights::pallet_staking_async::WeightInfo; + type MaxInvulnerables = frame_support::traits::ConstU32<20>; + type MaxDisabledValidators = ConstU32<100>; + type PlanningEraOffset = ConstU32<2>; + type RcClientInterface = StakingNextRcClient; +} + +impl pallet_staking_async_rc_client::Config for Runtime { + type RelayChainOrigin = EnsureRoot; + type AHStakingInterface = Staking; + type SendToRelayChain = XcmToRelayChain; +} + +#[derive(Encode, Decode)] +// Call indices taken from westend-next runtime. +pub enum RelayChainRuntimePallets { + #[codec(index = 67)] + AhClient(AhClientCalls), +} + +#[derive(Encode, Decode)] +pub enum AhClientCalls { + #[codec(index = 0)] + ValidatorSet(rc_client::ValidatorSetReport), +} + +use pallet_staking_async_rc_client as rc_client; +use xcm::latest::{prelude::*, SendXcm}; + +pub struct XcmToRelayChain(PhantomData); +impl rc_client::SendToRelayChain for XcmToRelayChain { + type AccountId = AccountId; + + /// Send a new validator set report to relay chain. + fn validator_set(report: rc_client::ValidatorSetReport) { + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Instruction::Transact { + origin_kind: OriginKind::Native, + fallback_max_weight: None, + call: RelayChainRuntimePallets::AhClient(AhClientCalls::ValidatorSet(report)) + .encode() + .into(), + }, + ]); + let dest = Location::parent(); + let result = send_xcm::(dest, message); + + match result { + Ok(_) => { + log::info!(target: "runtime", "Successfully sent validator set report to relay chain") + }, + Err(e) => { + log::error!(target: "runtime", "Failed to send validator set report to relay chain: {:?}", e) + }, + } + } +} + +impl pallet_fast_unstake::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BatchSize = ConstU32<64>; + type Deposit = ConstU128<{ UNITS }>; + type ControlOrigin = EnsureRoot; + type Staking = Staking; + type MaxErasToCheckPerBlock = ConstU32<1>; + type WeightInfo = weights::pallet_fast_unstake::WeightInfo; +} + +parameter_types! { + pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); + pub const MaxPointsToBalance: u8 = 10; +} + +impl pallet_nomination_pools::Config for Runtime { + type Filter = (); + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_nomination_pools::WeightInfo; + type Currency = Balances; + type RuntimeFreezeReason = RuntimeFreezeReason; + type RewardCounter = FixedU128; + type BalanceToU256 = BalanceToU256; + type U256ToBalance = U256ToBalance; + type StakeAdapter = + pallet_nomination_pools::adapter::DelegateStake; + type PostUnbondingPoolsWindow = ConstU32<4>; + type MaxMetadataLen = ConstU32<256>; + // we use the same number of allowed unlocking chunks as with staking. + type MaxUnbonding = ::MaxUnlockingChunks; + type PalletId = PoolsPalletId; + type MaxPointsToBalance = MaxPointsToBalance; + type AdminOrigin = EitherOf, StakingAdmin>; + type BlockNumberProvider = RelayChainBlockNumberProvider; +} + +parameter_types! { + pub const DelegatedStakingPalletId: PalletId = PalletId(*b"py/dlstk"); + pub const SlashRewardFraction: Perbill = Perbill::from_percent(1); +} + +impl pallet_delegated_staking::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = DelegatedStakingPalletId; + type Currency = Balances; + type OnSlash = (); + type SlashRewardFraction = SlashRewardFraction; + type RuntimeHoldReason = RuntimeHoldReason; + type CoreStaking = Staking; +} + +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; + +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; +} + +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, extension: TxExtension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + +/// Submits a transaction with the node's public and signature type. Adheres to the signed extension +/// format of the chain. +impl frame_system::offchain::CreateSignedTransaction for Runtime +where + RuntimeCall: From, +{ + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( + call: RuntimeCall, + public: ::Signer, + account: AccountId, + nonce: ::Nonce, + ) -> Option { + use sp_runtime::traits::StaticLookup; + // take the biggest period possible. + let period = + BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; + + let current_block = System::block_number() + .saturated_into::() + // The `System::block_number` is initialized with `n+1`, + // so the actual block number is `n`. + .saturating_sub(1); + let tip = 0; + let tx_ext = TxExtension::from(( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None), + frame_metadata_hash_extension::CheckMetadataHash::::new(true), + )); + let raw_payload = SignedPayload::new(call, tx_ext) + .map_err(|e| { + log::warn!("Unable to create signed payload: {:?}", e); + }) + .ok()?; + let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; + let (call, tx_ext, _) = raw_payload.deconstruct(); + let address = ::Lookup::unlookup(account); + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) + } +} + +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/block_weights.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/block_weights.rs new file mode 100644 index 0000000000000..524a7d21e7736 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/block_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Importing a block with 0 Extrinsics. + pub const BlockExecutionWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::BlockExecutionWeight::get(); + + // At least 100 µs. + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 100 µs." + ); + // At most 50 ms. + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 50 ms." + ); + } + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_parachain_system.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 0000000000000..b91921ce85eb1 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,82 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemine-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// statemine-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/assets/statemine/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `8013` + // Minimum execution time: 1_622_000 picoseconds. + Weight::from_parts(1_709_000, 0) + .saturating_add(Weight::from_parts(0, 8013)) + // Standard Error: 22_138 + .saturating_add(Weight::from_parts(23_923_169, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_weight_reclaim.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_weight_reclaim.rs new file mode 100644 index 0000000000000..e0a6ad47f504e --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_weight_reclaim.rs @@ -0,0 +1,68 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_weight_reclaim` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-12-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_weight_reclaim +// --chain=asset-hub-next-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_weight_reclaim`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_weight_reclaim::WeightInfo for WeightInfo { + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1) + /// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::AllExtrinsicsLen` (r:1 w:0) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn storage_weight_reclaim() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 7_470_000 picoseconds. + Weight::from_parts(7_695_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_xcmp_queue.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_xcmp_queue.rs new file mode 100644 index 0000000000000..9c62f6ce8192f --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/cumulus_pallet_xcmp_queue.rs @@ -0,0 +1,139 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_xcmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// asset-hub-next-westend-dev +// --output +// cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_xcmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_u32() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(Weight::from_parts(0, 1561)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn enqueue_n_bytes_xcmp_message(_n: u32, ) -> Weight { + // todo: run ci benchmark + Weight::zero() + } + + fn enqueue_2_empty_xcmp_messages() -> Weight { + // todo: run ci benchmark + Weight::zero() + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `1561` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) + .saturating_add(Weight::from_parts(0, 1561)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `111` + // Estimated: `1596` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(4_000_000, 0) + .saturating_add(Weight::from_parts(0, 1596)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65747` + // Estimated: `69212` + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(68_000_000, 0) + .saturating_add(Weight::from_parts(0, 69212)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65710` + // Estimated: `69175` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 69175)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/extrinsic_weights.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/extrinsic_weights.rs new file mode 100644 index 0000000000000..2e828f8df5ae6 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/extrinsic_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Executing a NO-OP `System::remarks` Extrinsic. + pub const ExtrinsicBaseWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::ExtrinsicBaseWeight::get(); + + // At least 10 µs. + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 10 µs." + ); + // At most 1 ms. + assert!( + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 1 ms." + ); + } + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/frame_system.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/frame_system.rs new file mode 100644 index 0000000000000..b99b216eb5722 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/frame_system.rs @@ -0,0 +1,183 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=frame_system +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_105_000 picoseconds. + Weight::from_parts(2_139_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(388, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_540_000 picoseconds. + Weight::from_parts(7_767_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2 + .saturating_add(Weight::from_parts(1_730, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 3_980_000 picoseconds. + Weight::from_parts(4_120_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `1641` + // Minimum execution time: 102_511_794_000 picoseconds. + Weight::from_parts(105_688_965_000, 0) + .saturating_add(Weight::from_parts(0, 1641)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_241_000 picoseconds. + Weight::from_parts(2_329_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_336 + .saturating_add(Weight::from_parts(756_084, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_233_000 picoseconds. + Weight::from_parts(2_295_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 990 + .saturating_add(Weight::from_parts(573_213, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `83 + p * (69 ±0)` + // Estimated: `86 + p * (70 ±0)` + // Minimum execution time: 3_990_000 picoseconds. + Weight::from_parts(4_110_000, 0) + .saturating_add(Weight::from_parts(0, 86)) + // Standard Error: 1_782 + .saturating_add(Weight::from_parts(1_220_573, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 33_027_000 picoseconds. + Weight::from_parts(33_027_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `1518` + // Minimum execution time: 118_101_992_000 picoseconds. + Weight::from_parts(118_101_992_000, 0) + .saturating_add(Weight::from_parts(0, 1518)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/frame_system_extensions.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000..5e24bcd7e1512 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/frame_system_extensions.rs @@ -0,0 +1,154 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `697235d969a1`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/release/wbuild/asset-hub-next-westend-runtime/asset_hub_next_westend_runtime.wasm +// --pallet=frame_system_extensions +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `54` + // Estimated: `3509` + // Minimum execution time: 6_329_000 picoseconds. + Weight::from_parts(6_665_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 12_110_000 picoseconds. + Weight::from_parts(12_883_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 12_241_000 picoseconds. + Weight::from_parts(12_780_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 825_000 picoseconds. + Weight::from_parts(890_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 10_159_000 picoseconds. + Weight::from_parts(10_461_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 578_000 picoseconds. + Weight::from_parts(660_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 618_000 picoseconds. + Weight::from_parts(682_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::AllExtrinsicsLen` (r:1 w:1) + /// Proof: `System::AllExtrinsicsLen` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1) + /// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `24` + // Estimated: `1533` + // Minimum execution time: 9_964_000 picoseconds. + Weight::from_parts(10_419_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::ExtrinsicWeightReclaimed` (r:1 w:1) + /// Proof: `System::ExtrinsicWeightReclaimed` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + fn weight_reclaim() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1533` + // Minimum execution time: 4_890_000 picoseconds. + Weight::from_parts(5_163_000, 0) + .saturating_add(Weight::from_parts(0, 1533)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/mod.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/mod.rs new file mode 100644 index 0000000000000..9344f5ecaa259 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/mod.rs @@ -0,0 +1,65 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod block_weights; +pub mod cumulus_pallet_parachain_system; +pub mod cumulus_pallet_weight_reclaim; +pub mod cumulus_pallet_xcmp_queue; +pub mod extrinsic_weights; +pub mod frame_system; +pub mod frame_system_extensions; +pub mod pallet_asset_conversion; +pub mod pallet_asset_conversion_ops; +pub mod pallet_asset_conversion_tx_payment; +pub mod pallet_asset_rate; +pub mod pallet_asset_rewards; +pub mod pallet_assets_foreign; +pub mod pallet_assets_local; +pub mod pallet_assets_pool; +pub mod pallet_bags_list; +pub mod pallet_balances; +pub mod pallet_collator_selection; +pub mod pallet_conviction_voting; +pub mod pallet_fast_unstake; +pub mod pallet_message_queue; +pub mod pallet_migrations; +pub mod pallet_multisig; +pub mod pallet_nft_fractionalization; +pub mod pallet_nfts; +pub mod pallet_nomination_pools; +pub mod pallet_preimage; +pub mod pallet_proxy; +pub mod pallet_referenda; +pub mod pallet_scheduler; +pub mod pallet_session; +pub mod pallet_staking_async; +pub mod pallet_timestamp; +pub mod pallet_transaction_payment; +pub mod pallet_treasury; +pub mod pallet_uniques; +pub mod pallet_utility; +pub mod pallet_vesting; +pub mod pallet_whitelist; +pub mod pallet_xcm; +pub mod pallet_xcm_bridge_hub_router; +pub mod paritydb_weights; +pub mod rocksdb_weights; +pub mod xcm; + +pub use block_weights::constants::BlockExecutionWeight; +pub use extrinsic_weights::constants::ExtrinsicBaseWeight; +pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion.rs new file mode 100644 index 0000000000000..9746cc58d6068 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion.rs @@ -0,0 +1,180 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-30, STEPS: `20`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --steps=20 +// --repeat=2 +// --pallet=pallet-asset-conversion +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/pallet_asset_conversion.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion::WeightInfo for WeightInfo { + /// Storage: `AssetConversion::Pools` (r:1 w:1) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) + /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn create_pool() -> Weight { + // Proof Size summary in bytes: + // Measured: `408` + // Estimated: `4689` + // Minimum execution time: 922_000_000 picoseconds. + Weight::from_parts(1_102_000_000, 0) + .saturating_add(Weight::from_parts(0, 4689)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn add_liquidity() -> Weight { + // Proof Size summary in bytes: + // Measured: `1117` + // Estimated: `7404` + // Minimum execution time: 1_597_000_000 picoseconds. + Weight::from_parts(1_655_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn remove_liquidity() -> Weight { + // Proof Size summary in bytes: + // Measured: `1106` + // Estimated: `7404` + // Minimum execution time: 1_500_000_000 picoseconds. + Weight::from_parts(1_633_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `ForeignAssets::Asset` (r:2 w:2) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:4 w:4) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[2, 3]`. + fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (557 ±0)` + // Estimated: `7404 + n * (393 ±92)` + // Minimum execution time: 930_000_000 picoseconds. + Weight::from_parts(960_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + // Standard Error: 17_993_720 + .saturating_add(Weight::from_parts(41_959_183, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:2 w:2) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:4 w:4) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// The range of component `n` is `[2, 3]`. + fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (557 ±0)` + // Estimated: `7404 + n * (393 ±92)` + // Minimum execution time: 940_000_000 picoseconds. + Weight::from_parts(956_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + // Standard Error: 15_746_647 + .saturating_add(Weight::from_parts(39_193_877, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) + } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 381_000_000 picoseconds. + Weight::from_parts(398_540_909, 6360) + // Standard Error: 1_330_283 + .saturating_add(Weight::from_parts(209_463_636, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion_ops.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion_ops.rs new file mode 100644 index 0000000000000..22fc37bfc8102 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion_ops.rs @@ -0,0 +1,72 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion_ops` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-02-15, STEPS: `10`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --steps=10 +// --repeat=2 +// --pallet=pallet-asset-conversion-ops +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_ops`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_ops::WeightInfo for WeightInfo { + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn migrate_to_new_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `1105` + // Estimated: `7404` + // Minimum execution time: 2_216_000_000 picoseconds. + Weight::from_parts(2_379_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(8)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion_tx_payment.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion_tx_payment.rs new file mode 100644 index 0000000000000..7184731c7c6dd --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_conversion_tx_payment.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_conversion_tx_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-04, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Georges-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_asset_conversion_tx_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/ +// --chain=asset-hub-next-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_conversion_tx_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_conversion_tx_payment::WeightInfo for WeightInfo { + fn charge_asset_tx_payment_zero() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_000_000 picoseconds. + Weight::from_parts(9_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_native() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 214_000_000 picoseconds. + Weight::from_parts(219_000_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_asset_tx_payment_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `631` + // Estimated: `7404` + // Minimum execution time: 1_211_000_000 picoseconds. + Weight::from_parts(1_243_000_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_rate.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_rate.rs new file mode 100644 index 0000000000000..b3137a78fac13 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_rate.rs @@ -0,0 +1,87 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_rate` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-04, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot +// benchmark +// pallet +// --chain=polkadot-dev +// --steps=50 +// --repeat=2 +// --pallet=pallet_asset_rate +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./runtime/polkadot/src/weights/ +// --header=./file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_rate`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_rate::WeightInfo for WeightInfo { + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `4702` + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(69_000_000, 0) + .saturating_add(Weight::from_parts(0, 4702)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + fn update() -> Weight { + // Proof Size summary in bytes: + // Measured: `110` + // Estimated: `4702` + // Minimum execution time: 69_000_000 picoseconds. + Weight::from_parts(71_000_000, 0) + .saturating_add(Weight::from_parts(0, 4702)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: AssetRate ConversionRateToNative (r:1 w:1) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + fn remove() -> Weight { + // Proof Size summary in bytes: + // Measured: `110` + // Estimated: `4702` + // Minimum execution time: 70_000_000 picoseconds. + Weight::from_parts(90_000_000, 0) + .saturating_add(Weight::from_parts(0, 4702)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_rewards.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_rewards.rs new file mode 100644 index 0000000000000..b82f0215057c5 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_asset_rewards.rs @@ -0,0 +1,218 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_asset_rewards` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-01-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ys-ssygq-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_asset_rewards +// --chain=asset-hub-next-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_rewards`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_rewards::WeightInfo for WeightInfo { + /// Storage: `Assets::Asset` (r:2 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `AssetRewards::NextPoolId` (r:1 w:1) + /// Proof: `AssetRewards::NextPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + /// Storage: `AssetRewards::PoolCost` (r:0 w:1) + /// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `AssetRewards::Pools` (r:0 w:1) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + fn create_pool() -> Weight { + // Proof Size summary in bytes: + // Measured: `392` + // Estimated: `6360` + // Minimum execution time: 60_734_000 picoseconds. + Weight::from_parts(61_828_000, 0) + .saturating_add(Weight::from_parts(0, 6360)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `AssetRewards::Pools` (r:1 w:1) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + /// Storage: `AssetRewards::PoolStakers` (r:1 w:1) + /// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::Freezes` (r:1 w:1) + /// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:0) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1) + /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + fn stake() -> Weight { + // Proof Size summary in bytes: + // Measured: `906` + // Estimated: `4809` + // Minimum execution time: 56_014_000 picoseconds. + Weight::from_parts(58_487_000, 0) + .saturating_add(Weight::from_parts(0, 4809)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AssetRewards::Pools` (r:1 w:1) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + /// Storage: `AssetRewards::PoolStakers` (r:1 w:1) + /// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::Freezes` (r:1 w:1) + /// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:0) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1) + /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + fn unstake() -> Weight { + // Proof Size summary in bytes: + // Measured: `906` + // Estimated: `4809` + // Minimum execution time: 59_071_000 picoseconds. + Weight::from_parts(60_631_000, 0) + .saturating_add(Weight::from_parts(0, 4809)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AssetRewards::Pools` (r:1 w:0) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + /// Storage: `AssetRewards::PoolStakers` (r:1 w:1) + /// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0) + /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + fn harvest_rewards() -> Weight { + // Proof Size summary in bytes: + // Measured: `1106` + // Estimated: `6208` + // Minimum execution time: 80_585_000 picoseconds. + Weight::from_parts(82_186_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AssetRewards::Pools` (r:1 w:1) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + fn set_pool_reward_rate_per_block() -> Weight { + // Proof Size summary in bytes: + // Measured: `318` + // Estimated: `4809` + // Minimum execution time: 17_083_000 picoseconds. + Weight::from_parts(17_816_000, 0) + .saturating_add(Weight::from_parts(0, 4809)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssetRewards::Pools` (r:1 w:1) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + fn set_pool_admin() -> Weight { + // Proof Size summary in bytes: + // Measured: `318` + // Estimated: `4809` + // Minimum execution time: 15_269_000 picoseconds. + Weight::from_parts(15_881_000, 0) + .saturating_add(Weight::from_parts(0, 4809)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssetRewards::Pools` (r:1 w:1) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + fn set_pool_expiry_block() -> Weight { + // Proof Size summary in bytes: + // Measured: `318` + // Estimated: `4809` + // Minimum execution time: 17_482_000 picoseconds. + Weight::from_parts(18_124_000, 0) + .saturating_add(Weight::from_parts(0, 4809)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssetRewards::Pools` (r:1 w:0) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0) + /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn deposit_reward_tokens() -> Weight { + // Proof Size summary in bytes: + // Measured: `781` + // Estimated: `6208` + // Minimum execution time: 66_644_000 picoseconds. + Weight::from_parts(67_950_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AssetRewards::Pools` (r:1 w:1) + /// Proof: `AssetRewards::Pools` (`max_values`: None, `max_size`: Some(1344), added: 3819, mode: `MaxEncodedLen`) + /// Storage: `AssetRewards::PoolStakers` (r:1 w:0) + /// Proof: `AssetRewards::PoolStakers` (`max_values`: None, `max_size`: Some(116), added: 2591, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:1) + /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AssetRewards::PoolCost` (r:1 w:1) + /// Proof: `AssetRewards::PoolCost` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(157), added: 2632, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::Freezes` (r:0 w:1) + /// Proof: `AssetsFreezer::Freezes` (`max_values`: None, `max_size`: Some(87), added: 2562, mode: `MaxEncodedLen`) + fn cleanup_pool() -> Weight { + // Proof Size summary in bytes: + // Measured: `1139` + // Estimated: `6208` + // Minimum execution time: 124_136_000 picoseconds. + Weight::from_parts(128_642_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(10)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_foreign.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_foreign.rs new file mode 100644 index 0000000000000..483794c8cc6ea --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_foreign.rs @@ -0,0 +1,552 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_assets` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_assets +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_assets`. +pub struct WeightInfo(PhantomData); +impl pallet_assets::WeightInfo for WeightInfo { + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `107` + // Estimated: `4273` + // Minimum execution time: 29_123_000 picoseconds. + Weight::from_parts(30_025_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `4273` + // Minimum execution time: 11_857_000 picoseconds. + Weight::from_parts(12_256_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn start_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `4273` + // Minimum execution time: 14_513_000 picoseconds. + Weight::from_parts(15_110_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1001 w:1000) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + fn destroy_accounts(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + c * (208 ±0)` + // Estimated: `4273 + c * (3207 ±0)` + // Minimum execution time: 17_168_000 picoseconds. + Weight::from_parts(17_732_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + // Standard Error: 8_406 + .saturating_add(Weight::from_parts(15_274_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 3207).saturating_mul(c.into())) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1001 w:1000) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy_approvals(a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `413 + a * (86 ±0)` + // Estimated: `4273 + a * (3221 ±0)` + // Minimum execution time: 18_111_000 picoseconds. + Weight::from_parts(18_573_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + // Standard Error: 3_988 + .saturating_add(Weight::from_parts(15_270_030, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 3221).saturating_mul(a.into())) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:0) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + fn finish_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 14_768_000 picoseconds. + Weight::from_parts(15_323_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 25_855_000 picoseconds. + Weight::from_parts(26_592_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4273` + // Minimum execution time: 33_065_000 picoseconds. + Weight::from_parts(34_113_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `7404` + // Minimum execution time: 45_409_000 picoseconds. + Weight::from_parts(46_176_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `7404` + // Minimum execution time: 40_017_000 picoseconds. + Weight::from_parts(41_081_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `7404` + // Minimum execution time: 45_189_000 picoseconds. + Weight::from_parts(46_133_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4273` + // Minimum execution time: 18_147_000 picoseconds. + Weight::from_parts(18_923_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn thaw() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4273` + // Minimum execution time: 17_801_000 picoseconds. + Weight::from_parts(18_472_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn freeze_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `4273` + // Minimum execution time: 14_204_000 picoseconds. + Weight::from_parts(14_671_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn thaw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `4273` + // Minimum execution time: 13_752_000 picoseconds. + Weight::from_parts(14_380_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:0) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 15_310_000 picoseconds. + Weight::from_parts(15_761_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 13_656_000 picoseconds. + Weight::from_parts(14_121_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:1) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 28_413_000 picoseconds. + Weight::from_parts(29_399_881, 0) + .saturating_add(Weight::from_parts(0, 4273)) + // Standard Error: 369 + .saturating_add(Weight::from_parts(5_400, 0).saturating_mul(n.into())) + // Standard Error: 369 + .saturating_add(Weight::from_parts(3_525, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:1) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `406` + // Estimated: `4273` + // Minimum execution time: 29_660_000 picoseconds. + Weight::from_parts(30_281_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:1) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn force_set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `81` + // Estimated: `4273` + // Minimum execution time: 12_949_000 picoseconds. + Weight::from_parts(13_813_061, 0) + .saturating_add(Weight::from_parts(0, 4273)) + // Standard Error: 229 + .saturating_add(Weight::from_parts(480, 0).saturating_mul(n.into())) + // Standard Error: 229 + .saturating_add(Weight::from_parts(94, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Metadata` (r:1 w:1) + /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) + fn force_clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `406` + // Estimated: `4273` + // Minimum execution time: 29_002_000 picoseconds. + Weight::from_parts(29_772_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn force_asset_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 13_023_000 picoseconds. + Weight::from_parts(13_528_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1 w:1) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `4273` + // Minimum execution time: 32_393_000 picoseconds. + Weight::from_parts(33_164_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1 w:1) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:2 w:2) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `520` + // Estimated: `7404` + // Minimum execution time: 64_647_000 picoseconds. + Weight::from_parts(65_669_000, 0) + .saturating_add(Weight::from_parts(0, 7404)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1 w:1) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `446` + // Estimated: `4273` + // Minimum execution time: 34_292_000 picoseconds. + Weight::from_parts(35_505_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Approvals` (r:1 w:1) + /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) + fn force_cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `446` + // Estimated: `4273` + // Minimum execution time: 35_358_000 picoseconds. + Weight::from_parts(36_553_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn set_min_balance() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 14_656_000 picoseconds. + Weight::from_parts(15_097_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn touch() -> Weight { + // Proof Size summary in bytes: + // Measured: `345` + // Estimated: `4273` + // Minimum execution time: 33_758_000 picoseconds. + Weight::from_parts(34_618_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn touch_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4273` + // Minimum execution time: 32_205_000 picoseconds. + Weight::from_parts(33_208_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn refund() -> Weight { + // Proof Size summary in bytes: + // Measured: `471` + // Estimated: `4273` + // Minimum execution time: 30_848_000 picoseconds. + Weight::from_parts(31_592_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Asset` (r:1 w:1) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + fn refund_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `401` + // Estimated: `4273` + // Minimum execution time: 28_920_000 picoseconds. + Weight::from_parts(29_519_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ForeignAssets::Asset` (r:1 w:0) + /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) + /// Storage: `ForeignAssets::Account` (r:1 w:1) + /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) + fn block() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4273` + // Minimum execution time: 17_938_000 picoseconds. + Weight::from_parts(18_525_000, 0) + .saturating_add(Weight::from_parts(0, 4273)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 46_573_000 picoseconds. + Weight::from_parts(47_385_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_local.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_local.rs new file mode 100644 index 0000000000000..026c7fb5668ea --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_local.rs @@ -0,0 +1,550 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_assets` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_assets +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_assets`. +pub struct WeightInfo(PhantomData); +impl pallet_assets::WeightInfo for WeightInfo { + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `3675` + // Minimum execution time: 25_894_000 picoseconds. + Weight::from_parts(26_675_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3675` + // Minimum execution time: 10_155_000 picoseconds. + Weight::from_parts(10_864_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn start_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `3675` + // Minimum execution time: 12_904_000 picoseconds. + Weight::from_parts(13_723_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1001 w:1000) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + fn destroy_accounts(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + c * (208 ±0)` + // Estimated: `3675 + c * (2609 ±0)` + // Minimum execution time: 15_522_000 picoseconds. + Weight::from_parts(16_015_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 7_984 + .saturating_add(Weight::from_parts(15_024_602, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1001 w:1000) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy_approvals(a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `414 + a * (86 ±0)` + // Estimated: `3675 + a * (2623 ±0)` + // Minimum execution time: 16_570_000 picoseconds. + Weight::from_parts(16_940_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 4_030 + .saturating_add(Weight::from_parts(15_317_878, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn finish_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 13_327_000 picoseconds. + Weight::from_parts(13_909_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 23_662_000 picoseconds. + Weight::from_parts(24_510_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 30_903_000 picoseconds. + Weight::from_parts(31_725_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `6208` + // Minimum execution time: 42_163_000 picoseconds. + Weight::from_parts(43_176_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `6208` + // Minimum execution time: 36_812_000 picoseconds. + Weight::from_parts(37_836_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `6208` + // Minimum execution time: 41_923_000 picoseconds. + Weight::from_parts(43_200_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 16_567_000 picoseconds. + Weight::from_parts(17_125_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn thaw() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 16_271_000 picoseconds. + Weight::from_parts(17_116_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn freeze_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `3675` + // Minimum execution time: 12_772_000 picoseconds. + Weight::from_parts(13_267_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn thaw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `3675` + // Minimum execution time: 12_477_000 picoseconds. + Weight::from_parts(13_110_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:0) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 13_857_000 picoseconds. + Weight::from_parts(14_270_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 12_844_000 picoseconds. + Weight::from_parts(13_215_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 27_149_000 picoseconds. + Weight::from_parts(28_147_817, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 410 + .saturating_add(Weight::from_parts(3_935, 0).saturating_mul(n.into())) + // Standard Error: 410 + .saturating_add(Weight::from_parts(2_686, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `407` + // Estimated: `3675` + // Minimum execution time: 27_866_000 picoseconds. + Weight::from_parts(28_735_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn force_set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `82` + // Estimated: `3675` + // Minimum execution time: 11_877_000 picoseconds. + Weight::from_parts(12_700_940, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 219 + .saturating_add(Weight::from_parts(253, 0).saturating_mul(n.into())) + // Standard Error: 219 + .saturating_add(Weight::from_parts(1_004, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn force_clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `407` + // Estimated: `3675` + // Minimum execution time: 27_536_000 picoseconds. + Weight::from_parts(28_635_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn force_asset_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 12_010_000 picoseconds. + Weight::from_parts(12_526_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `3675` + // Minimum execution time: 30_436_000 picoseconds. + Weight::from_parts(31_420_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `521` + // Estimated: `6208` + // Minimum execution time: 60_189_000 picoseconds. + Weight::from_parts(61_948_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `447` + // Estimated: `3675` + // Minimum execution time: 33_033_000 picoseconds. + Weight::from_parts(33_710_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Approvals` (r:1 w:1) + /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn force_cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `447` + // Estimated: `3675` + // Minimum execution time: 33_121_000 picoseconds. + Weight::from_parts(34_112_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn set_min_balance() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 12_994_000 picoseconds. + Weight::from_parts(13_442_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn touch() -> Weight { + // Proof Size summary in bytes: + // Measured: `346` + // Estimated: `3675` + // Minimum execution time: 31_950_000 picoseconds. + Weight::from_parts(32_750_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn touch_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `243` + // Estimated: `3675` + // Minimum execution time: 29_976_000 picoseconds. + Weight::from_parts(31_186_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn refund() -> Weight { + // Proof Size summary in bytes: + // Measured: `472` + // Estimated: `3675` + // Minimum execution time: 29_549_000 picoseconds. + Weight::from_parts(30_533_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn refund_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `402` + // Estimated: `3675` + // Minimum execution time: 27_746_000 picoseconds. + Weight::from_parts(28_561_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Assets::Asset` (r:1 w:0) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn block() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `3675` + // Minimum execution time: 16_408_000 picoseconds. + Weight::from_parts(17_038_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 46_573_000 picoseconds. + Weight::from_parts(47_385_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_pool.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_pool.rs new file mode 100644 index 0000000000000..a09ece1b184f7 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_assets_pool.rs @@ -0,0 +1,544 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_assets` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_assets +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_assets`. +pub struct WeightInfo(PhantomData); +impl pallet_assets::WeightInfo for WeightInfo { + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3675` + // Minimum execution time: 11_148_000 picoseconds. + Weight::from_parts(11_683_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3675` + // Minimum execution time: 10_811_000 picoseconds. + Weight::from_parts(11_324_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn start_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `314` + // Estimated: `3675` + // Minimum execution time: 13_360_000 picoseconds. + Weight::from_parts(13_961_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1001 w:1000) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1000 w:1000) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + fn destroy_accounts(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + c * (208 ±0)` + // Estimated: `3675 + c * (2609 ±0)` + // Minimum execution time: 16_162_000 picoseconds. + Weight::from_parts(16_588_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 8_120 + .saturating_add(Weight::from_parts(14_997_923, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1001 w:1000) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy_approvals(a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + a * (86 ±0)` + // Estimated: `3675 + a * (2623 ±0)` + // Minimum execution time: 17_013_000 picoseconds. + Weight::from_parts(17_433_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 3_595 + .saturating_add(Weight::from_parts(5_514_723, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:0) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn finish_destroy() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 13_565_000 picoseconds. + Weight::from_parts(14_080_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 24_156_000 picoseconds. + Weight::from_parts(24_879_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3675` + // Minimum execution time: 31_099_000 picoseconds. + Weight::from_parts(31_804_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `6208` + // Minimum execution time: 42_337_000 picoseconds. + Weight::from_parts(43_359_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `6208` + // Minimum execution time: 37_216_000 picoseconds. + Weight::from_parts(37_927_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `6208` + // Minimum execution time: 42_250_000 picoseconds. + Weight::from_parts(43_145_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3675` + // Minimum execution time: 16_897_000 picoseconds. + Weight::from_parts(17_424_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn thaw() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3675` + // Minimum execution time: 16_804_000 picoseconds. + Weight::from_parts(17_335_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn freeze_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `314` + // Estimated: `3675` + // Minimum execution time: 13_195_000 picoseconds. + Weight::from_parts(13_531_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn thaw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `314` + // Estimated: `3675` + // Minimum execution time: 12_982_000 picoseconds. + Weight::from_parts(13_469_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:0) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 14_275_000 picoseconds. + Weight::from_parts(14_696_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 12_972_000 picoseconds. + Weight::from_parts(13_459_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:1) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 15_092_000 picoseconds. + Weight::from_parts(15_929_556, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 289 + .saturating_add(Weight::from_parts(3_185, 0).saturating_mul(n.into())) + // Standard Error: 289 + .saturating_add(Weight::from_parts(1_709, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:1) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `444` + // Estimated: `3675` + // Minimum execution time: 15_711_000 picoseconds. + Weight::from_parts(16_183_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:1) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn force_set_metadata(n: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119` + // Estimated: `3675` + // Minimum execution time: 13_288_000 picoseconds. + Weight::from_parts(14_061_633, 0) + .saturating_add(Weight::from_parts(0, 3675)) + // Standard Error: 215 + .saturating_add(Weight::from_parts(1_169, 0).saturating_mul(n.into())) + // Standard Error: 215 + .saturating_add(Weight::from_parts(900, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Metadata` (r:1 w:1) + /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + fn force_clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `444` + // Estimated: `3675` + // Minimum execution time: 15_235_000 picoseconds. + Weight::from_parts(15_998_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn force_asset_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 12_556_000 picoseconds. + Weight::from_parts(13_054_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1 w:1) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `314` + // Estimated: `3675` + // Minimum execution time: 18_635_000 picoseconds. + Weight::from_parts(19_431_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1 w:1) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:2 w:2) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `558` + // Estimated: `6208` + // Minimum execution time: 49_082_000 picoseconds. + Weight::from_parts(50_414_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1 w:1) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `484` + // Estimated: `3675` + // Minimum execution time: 20_978_000 picoseconds. + Weight::from_parts(21_628_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Approvals` (r:1 w:1) + /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) + fn force_cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `484` + // Estimated: `3675` + // Minimum execution time: 21_453_000 picoseconds. + Weight::from_parts(22_134_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn set_min_balance() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 13_390_000 picoseconds. + Weight::from_parts(13_920_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn touch() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 18_063_000 picoseconds. + Weight::from_parts(18_669_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn touch_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `280` + // Estimated: `3675` + // Minimum execution time: 17_949_000 picoseconds. + Weight::from_parts(18_891_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn refund() -> Weight { + // Proof Size summary in bytes: + // Measured: `406` + // Estimated: `3675` + // Minimum execution time: 14_696_000 picoseconds. + Weight::from_parts(15_295_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + fn refund_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `3675` + // Minimum execution time: 14_643_000 picoseconds. + Weight::from_parts(15_289_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PoolAssets::Asset` (r:1 w:0) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + fn block() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3675` + // Minimum execution time: 16_619_000 picoseconds. + Weight::from_parts(17_279_000, 0) + .saturating_add(Weight::from_parts(0, 3675)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 46_573_000 picoseconds. + Weight::from_parts(47_385_000, 3593) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_bags_list.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_bags_list.rs new file mode 100644 index 0000000000000..ba5f933a43024 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_bags_list.rs @@ -0,0 +1,110 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_bags_list` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_bags_list +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_bags_list`. +pub struct WeightInfo(PhantomData); +impl pallet_bags_list::WeightInfo for WeightInfo { + /// Storage: Staking Bonded (r:1 w:0) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:1 w:0) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: VoterList ListNodes (r:4 w:4) + /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) + /// Storage: VoterList ListBags (r:1 w:1) + /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + fn rebag_non_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1656` + // Estimated: `11506` + // Minimum execution time: 60_240_000 picoseconds. + Weight::from_parts(62_834_000, 0) + .saturating_add(Weight::from_parts(0, 11506)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: Staking Bonded (r:1 w:0) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:1 w:0) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: VoterList ListNodes (r:3 w:3) + /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) + /// Storage: VoterList ListBags (r:2 w:2) + /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + fn rebag_terminal() -> Weight { + // Proof Size summary in bytes: + // Measured: `1550` + // Estimated: `8877` + // Minimum execution time: 59_084_000 picoseconds. + Weight::from_parts(60_589_000, 0) + .saturating_add(Weight::from_parts(0, 8877)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: VoterList ListNodes (r:4 w:4) + /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) + /// Storage: Staking Bonded (r:2 w:0) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:2 w:0) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: VoterList CounterForListNodes (r:1 w:1) + /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: VoterList ListBags (r:1 w:1) + /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + fn put_in_front_of() -> Weight { + // Proof Size summary in bytes: + // Measured: `1861` + // Estimated: `11506` + // Minimum execution time: 65_945_000 picoseconds. + Weight::from_parts(67_429_000, 0) + .saturating_add(Weight::from_parts(0, 11506)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(6)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_balances.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000..0a1d0869e990a --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_balances.rs @@ -0,0 +1,179 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_balances` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-05-06, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_balances +// --chain=asset-hub-next-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_balances`. +pub struct WeightInfo(PhantomData); +impl pallet_balances::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 45_289_000 picoseconds. + Weight::from_parts(46_764_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 35_052_000 picoseconds. + Weight::from_parts(36_494_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_creating() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 12_361_000 picoseconds. + Weight::from_parts(12_668_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_killing() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 17_253_000 picoseconds. + Weight::from_parts(17_733_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 45_674_000 picoseconds. + Weight::from_parts(47_981_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 45_021_000 picoseconds. + Weight::from_parts(46_292_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 15_071_000 picoseconds. + Weight::from_parts(15_406_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (136 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 14_779_000 picoseconds. + Weight::from_parts(15_129_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 10_629 + .saturating_add(Weight::from_parts(13_558_995, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } + /// Storage: `Balances::InactiveIssuance` (r:1 w:0) + /// Proof: `Balances::InactiveIssuance` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn force_adjust_total_issuance() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1501` + // Minimum execution time: 5_274_000 picoseconds. + Weight::from_parts(5_727_000, 0) + .saturating_add(Weight::from_parts(0, 1501)) + .saturating_add(T::DbWeight::get().reads(1)) + } + fn burn_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 28_088_000 picoseconds. + Weight::from_parts(28_980_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn burn_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 19_002_000 picoseconds. + Weight::from_parts(19_480_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_collator_selection.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_collator_selection.rs new file mode 100644 index 0000000000000..432be72274343 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_collator_selection.rs @@ -0,0 +1,250 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collator_selection` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_collator_selection +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collator_selection`. +pub struct WeightInfo(PhantomData); +impl pallet_collator_selection::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:20 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 20]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `163 + b * (79 ±0)` + // Estimated: `1154 + b * (2555 ±0)` + // Minimum execution time: 14_105_000 picoseconds. + Weight::from_parts(12_034_824, 0) + .saturating_add(Weight::from_parts(0, 1154)) + // Standard Error: 7_023 + .saturating_add(Weight::from_parts(3_121_830, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) + } + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 19]`. + /// The range of component `c` is `[1, 99]`. + fn add_invulnerable(b: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `756 + b * (32 ±0) + c * (53 ±0)` + // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` + // Minimum execution time: 47_466_000 picoseconds. + Weight::from_parts(42_189_027, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 14_224 + .saturating_add(Weight::from_parts(291_155, 0).saturating_mul(b.into())) + // Standard Error: 2_696 + .saturating_add(Weight::from_parts(233_090, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[5, 20]`. + fn remove_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119 + b * (32 ±0)` + // Estimated: `6287` + // Minimum execution time: 15_278_000 picoseconds. + Weight::from_parts(15_424_907, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 4_281 + .saturating_add(Weight::from_parts(197_354, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_desired_candidates() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_170_000 picoseconds. + Weight::from_parts(7_455_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_969_000 picoseconds. + Weight::from_parts(7_350_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 99]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `736 + c * (52 ±0)` + // Estimated: `6287 + c * (54 ±0)` + // Minimum execution time: 40_783_000 picoseconds. + Weight::from_parts(43_731_825, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_328 + .saturating_add(Weight::from_parts(232_983, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[4, 100]`. + fn leave_intent(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 32_537_000 picoseconds. + Weight::from_parts(34_922_361, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_494 + .saturating_add(Weight::from_parts(199_859, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn note_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 43_240_000 picoseconds. + Weight::from_parts(44_434_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:97 w:97) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 100]`. + /// The range of component `c` is `[1, 100]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` + // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` + // Minimum execution time: 16_841_000 picoseconds. + Weight::from_parts(17_460_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 347_803 + .saturating_add(Weight::from_parts(15_008_101, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_conviction_voting.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_conviction_voting.rs new file mode 100644 index 0000000000000..31dd00e5c39cb --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_conviction_voting.rs @@ -0,0 +1,195 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_conviction_voting` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-xerhrdyb-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: `Some(Wasm)`, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_conviction_voting +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_conviction_voting`. +pub struct WeightInfo(PhantomData); +impl pallet_conviction_voting::WeightInfo for WeightInfo { + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote_new() -> Weight { + // Proof Size summary in bytes: + // Measured: `13445` + // Estimated: `42428` + // Minimum execution time: 152_223_000 picoseconds. + Weight::from_parts(162_148_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote_existing() -> Weight { + // Proof Size summary in bytes: + // Measured: `14166` + // Estimated: `83866` + // Minimum execution time: 220_361_000 picoseconds. + Weight::from_parts(236_478_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn remove_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13918` + // Estimated: `83866` + // Minimum execution time: 198_787_000 picoseconds. + Weight::from_parts(204_983_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn remove_other_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13004` + // Estimated: `30706` + // Minimum execution time: 88_469_000 picoseconds. + Weight::from_parts(95_942_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 512]`. + fn delegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29640 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 79_951_000 picoseconds. + Weight::from_parts(1_844_983_097, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 160_158 + .saturating_add(Weight::from_parts(43_973_863, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 512]`. + fn undelegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29555 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 47_976_000 picoseconds. + Weight::from_parts(1_877_857_335, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 168_477 + .saturating_add(Weight::from_parts(43_303_902, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn unlock() -> Weight { + // Proof Size summary in bytes: + // Measured: `12218` + // Estimated: `30706` + // Minimum execution time: 102_868_000 picoseconds. + Weight::from_parts(110_438_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_fast_unstake.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_fast_unstake.rs new file mode 100644 index 0000000000000..bd492ea831b0d --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_fast_unstake.rs @@ -0,0 +1,204 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_fast_unstake` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_fast_unstake +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_fast_unstake`. +pub struct WeightInfo(PhantomData); +impl pallet_fast_unstake::WeightInfo for WeightInfo { + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ValidatorCount (r:1 w:0) + /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:1) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(3087), added: 3582, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:0) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking SlashingSpans (r:64 w:0) + /// Proof Skipped: Staking SlashingSpans (max_values: None, max_size: None, mode: Measured) + /// Storage: Staking Bonded (r:64 w:64) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Validators (r:64 w:0) + /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: Staking Nominators (r:64 w:0) + /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: System Account (r:64 w:64) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:64 w:64) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:64 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:0 w:64) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: Staking Payee (r:0 w:64) + /// Proof: Staking Payee (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// The range of component `b` is `[1, 64]`. + fn on_idle_unstake(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1090 + b * (344 ±0)` + // Estimated: `4572 + b * (3774 ±0)` + // Minimum execution time: 88_455_000 picoseconds. + Weight::from_parts(4_625_058, 0) + .saturating_add(Weight::from_parts(0, 4572)) + // Standard Error: 92_258 + .saturating_add(Weight::from_parts(61_451_756, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(b.into()))) + .saturating_add(Weight::from_parts(0, 3774).saturating_mul(b.into())) + } + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ValidatorCount (r:1 w:0) + /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:1) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(3087), added: 3582, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:0) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ErasStakers (r:257 w:0) + /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) + /// The range of component `v` is `[1, 256]`. + /// The range of component `b` is `[1, 64]`. + fn on_idle_check(v: u32, b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1304 + b * (48 ±0) + v * (2485 ±0)` + // Estimated: `4622 + b * (49 ±0) + v * (4961 ±0)` + // Minimum execution time: 737_381_000 picoseconds. + Weight::from_parts(747_714_000, 0) + .saturating_add(Weight::from_parts(0, 4622)) + // Standard Error: 4_194_752 + .saturating_add(Weight::from_parts(135_818_708, 0).saturating_mul(v.into())) + // Standard Error: 16_783_682 + .saturating_add(Weight::from_parts(525_457_699, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 4961).saturating_mul(v.into())) + } + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:1 w:1) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: FastUnstake Queue (r:1 w:1) + /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:0) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(3087), added: 3582, mode: MaxEncodedLen) + /// Storage: Staking Bonded (r:1 w:0) + /// Proof: Staking Bonded (max_values: None, max_size: Some(72), added: 2547, mode: MaxEncodedLen) + /// Storage: Staking Validators (r:1 w:0) + /// Proof: Staking Validators (max_values: None, max_size: Some(45), added: 2520, mode: MaxEncodedLen) + /// Storage: Staking Nominators (r:1 w:1) + /// Proof: Staking Nominators (max_values: None, max_size: Some(558), added: 3033, mode: MaxEncodedLen) + /// Storage: Staking CounterForNominators (r:1 w:1) + /// Proof: Staking CounterForNominators (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: VoterList ListNodes (r:1 w:1) + /// Proof: VoterList ListNodes (max_values: None, max_size: Some(154), added: 2629, mode: MaxEncodedLen) + /// Storage: VoterList ListBags (r:1 w:1) + /// Proof: VoterList ListBags (max_values: None, max_size: Some(82), added: 2557, mode: MaxEncodedLen) + /// Storage: VoterList CounterForListNodes (r:1 w:1) + /// Proof: VoterList CounterForListNodes (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn register_fast_unstake() -> Weight { + // Proof Size summary in bytes: + // Measured: `1826` + // Estimated: `4764` + // Minimum execution time: 122_429_000 picoseconds. + Weight::from_parts(125_427_000, 0) + .saturating_add(Weight::from_parts(0, 4764)) + .saturating_add(T::DbWeight::get().reads(15)) + .saturating_add(T::DbWeight::get().writes(9)) + } + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking Ledger (r:1 w:0) + /// Proof: Staking Ledger (max_values: None, max_size: Some(1091), added: 3566, mode: MaxEncodedLen) + /// Storage: FastUnstake Queue (r:1 w:1) + /// Proof: FastUnstake Queue (max_values: None, max_size: Some(56), added: 2531, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:0) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(3087), added: 3582, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:1) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn deregister() -> Weight { + // Proof Size summary in bytes: + // Measured: `1118` + // Estimated: `4572` + // Minimum execution time: 43_442_000 picoseconds. + Weight::from_parts(44_728_000, 0) + .saturating_add(Weight::from_parts(0, 4572)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + fn control() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_471_000 picoseconds. + Weight::from_parts(2_667_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_message_queue.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000..9e1f6058368b6 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_message_queue.rs @@ -0,0 +1,202 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/asset-hub-next-westend-runtime/asset_hub_next_westend_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `260` + // Estimated: `6044` + // Minimum execution time: 14_762_000 picoseconds. + Weight::from_parts(15_170_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `255` + // Estimated: `6044` + // Minimum execution time: 13_040_000 picoseconds. + Weight::from_parts(13_763_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3517` + // Minimum execution time: 4_919_000 picoseconds. + Weight::from_parts(5_213_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `108986` + // Minimum execution time: 7_497_000 picoseconds. + Weight::from_parts(7_748_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `108986` + // Minimum execution time: 7_545_000 picoseconds. + Weight::from_parts(7_795_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 262_800_000 picoseconds. + Weight::from_parts(272_183_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `208` + // Estimated: `3517` + // Minimum execution time: 8_440_000 picoseconds. + Weight::from_parts(8_894_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `198` + // Estimated: `3517` + // Minimum execution time: 7_192_000 picoseconds. + Weight::from_parts(7_484_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 83_423_000 picoseconds. + Weight::from_parts(84_122_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 109_221_000 picoseconds. + Weight::from_parts(110_617_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 172_899_000 picoseconds. + Weight::from_parts(175_824_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_migrations.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_migrations.rs new file mode 100644 index 0000000000000..ace516b9746c6 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_migrations.rs @@ -0,0 +1,226 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_migrations` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-01-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `17938671047b`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/asset-hub-next-westend-runtime/asset_hub_next_westend_runtime.wasm +// --pallet=pallet_migrations +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_migrations`. +pub struct WeightInfo(PhantomData); +impl pallet_migrations::WeightInfo for WeightInfo { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `67035` + // Minimum execution time: 8_697_000 picoseconds. + Weight::from_parts(8_998_000, 0) + .saturating_add(Weight::from_parts(0, 67035)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `67035` + // Minimum execution time: 2_737_000 picoseconds. + Weight::from_parts(2_813_000, 0) + .saturating_add(Weight::from_parts(0, 67035)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `129` + // Estimated: `3594` + // Minimum execution time: 6_181_000 picoseconds. + Weight::from_parts(6_458_000, 0) + .saturating_add(Weight::from_parts(0, 3594)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `225` + // Estimated: `3731` + // Minimum execution time: 11_932_000 picoseconds. + Weight::from_parts(12_539_000, 0) + .saturating_add(Weight::from_parts(0, 3731)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `3731` + // Minimum execution time: 11_127_000 picoseconds. + Weight::from_parts(11_584_000, 0) + .saturating_add(Weight::from_parts(0, 3731)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `3731` + // Minimum execution time: 12_930_000 picoseconds. + Weight::from_parts(13_272_000, 0) + .saturating_add(Weight::from_parts(0, 3731)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `3731` + // Minimum execution time: 13_709_000 picoseconds. + Weight::from_parts(14_123_000, 0) + .saturating_add(Weight::from_parts(0, 3731)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 162_000 picoseconds. + Weight::from_parts(188_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_737_000 picoseconds. + Weight::from_parts(2_919_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_087_000 picoseconds. + Weight::from_parts(3_320_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `67035` + // Minimum execution time: 6_470_000 picoseconds. + Weight::from_parts(6_760_000, 0) + .saturating_add(Weight::from_parts(0, 67035)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1022 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 15_864_000 picoseconds. + Weight::from_parts(24_535_162, 0) + .saturating_add(Weight::from_parts(0, 3834)) + // Standard Error: 8_688 + .saturating_add(Weight::from_parts(1_530_542, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `n` is `[0, 2048]`. + fn reset_pallet_migration(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1680 + n * (38 ±0)` + // Estimated: `758 + n * (39 ±0)` + // Minimum execution time: 2_168_000 picoseconds. + Weight::from_parts(2_226_000, 0) + .saturating_add(Weight::from_parts(0, 758)) + // Standard Error: 2_841 + .saturating_add(Weight::from_parts(935_438, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 39).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_multisig.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_multisig.rs new file mode 100644 index 0000000000000..0e3d63647bdb1 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_multisig.rs @@ -0,0 +1,172 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-01-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `e20fc9f125eb`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --extrinsic=* +// --chain=asset-hub-next-westend-dev +// --pallet=pallet_multisig +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_multisig`. +pub struct WeightInfo(PhantomData); +impl pallet_multisig::WeightInfo for WeightInfo { + fn poke_deposit(_s: u32, ) -> Weight { + Default::default() + } + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 16_032_000 picoseconds. + Weight::from_parts(16_636_014, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(632, 0).saturating_mul(z.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_create(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `295 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 47_519_000 picoseconds. + Weight::from_parts(33_881_382, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_770 + .saturating_add(Weight::from_parts(159_560, 0).saturating_mul(s.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(2_031, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `315` + // Estimated: `6811` + // Minimum execution time: 31_369_000 picoseconds. + Weight::from_parts(18_862_672, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_519 + .saturating_add(Weight::from_parts(141_546, 0).saturating_mul(s.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(2_057, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `418 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 55_421_000 picoseconds. + Weight::from_parts(33_628_199, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 2_430 + .saturating_add(Weight::from_parts(247_959, 0).saturating_mul(s.into())) + // Standard Error: 23 + .saturating_add(Weight::from_parts(2_339, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn approve_as_multi_create(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `295 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 30_380_000 picoseconds. + Weight::from_parts(32_147_463, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_530 + .saturating_add(Weight::from_parts(156_234, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn approve_as_multi_approve(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `315` + // Estimated: `6811` + // Minimum execution time: 17_016_000 picoseconds. + Weight::from_parts(17_777_791, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_216 + .saturating_add(Weight::from_parts(137_967, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn cancel_as_multi(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `482 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 31_594_000 picoseconds. + Weight::from_parts(31_850_574, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 2_031 + .saturating_add(Weight::from_parts(159_513, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nft_fractionalization.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nft_fractionalization.rs new file mode 100644 index 0000000000000..39dcc9fd07e4c --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nft_fractionalization.rs @@ -0,0 +1,116 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_nft_fractionalization` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_nft_fractionalization +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_nft_fractionalization`. +pub struct WeightInfo(PhantomData); +impl pallet_nft_fractionalization::WeightInfo for WeightInfo { + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Metadata` (r:1 w:1) + /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) + /// Storage: `NftFractionalization::NftToAsset` (r:0 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + fn fractionalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `462` + // Estimated: `4326` + // Minimum execution time: 174_312_000 picoseconds. + Weight::from_parts(177_275_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) + /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:1 w:1) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn unify() -> Weight { + // Proof Size summary in bytes: + // Measured: `1275` + // Estimated: `4326` + // Minimum execution time: 123_635_000 picoseconds. + Weight::from_parts(126_975_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(10)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nfts.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nfts.rs new file mode 100644 index 0000000000000..2ce4a34cfdf11 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nfts.rs @@ -0,0 +1,776 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_nfts` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_nfts +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_nfts`. +pub struct WeightInfo(PhantomData); +impl pallet_nfts::WeightInfo for WeightInfo { + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `179` + // Estimated: `3549` + // Minimum execution time: 37_322_000 picoseconds. + Weight::from_parts(38_364_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Nfts::NextCollectionId` (r:1 w:1) + /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3549` + // Minimum execution time: 22_254_000 picoseconds. + Weight::from_parts(22_613_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:1) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// The range of component `m` is `[0, 1000]`. + /// The range of component `c` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(_m: u32, c: u32, a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32204 + a * (366 ±0)` + // Estimated: `2523990 + a * (2954 ±0)` + // Minimum execution time: 1_204_644_000 picoseconds. + Weight::from_parts(1_122_618_254, 0) + .saturating_add(Weight::from_parts(0, 2523990)) + // Standard Error: 9_641 + .saturating_add(Weight::from_parts(39_956, 0).saturating_mul(c.into())) + // Standard Error: 9_641 + .saturating_add(Weight::from_parts(6_866_428, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(1004)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(1005)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) + } + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `455` + // Estimated: `4326` + // Minimum execution time: 47_903_000 picoseconds. + Weight::from_parts(48_938_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + fn force_mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `455` + // Estimated: `4326` + // Minimum execution time: 46_662_000 picoseconds. + Weight::from_parts(47_673_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `564` + // Estimated: `4326` + // Minimum execution time: 53_042_000 picoseconds. + Weight::from_parts(54_352_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `593` + // Estimated: `4326` + // Minimum execution time: 40_570_000 picoseconds. + Weight::from_parts(43_020_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:5000 w:5000) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5000]`. + fn redeposit(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `763 + i * (108 ±0)` + // Estimated: `3549 + i * (3336 ±0)` + // Minimum execution time: 15_982_000 picoseconds. + Weight::from_parts(16_291_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + // Standard Error: 23_954 + .saturating_add(Weight::from_parts(17_559_013, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn lock_item_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `435` + // Estimated: `3534` + // Minimum execution time: 20_084_000 picoseconds. + Weight::from_parts(20_572_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn unlock_item_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `435` + // Estimated: `3534` + // Minimum execution time: 20_007_000 picoseconds. + Weight::from_parts(20_221_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn lock_collection() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `3549` + // Minimum execution time: 16_815_000 picoseconds. + Weight::from_parts(17_191_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `388` + // Estimated: `3549` + // Minimum execution time: 22_234_000 picoseconds. + Weight::from_parts(22_888_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `369` + // Estimated: `6078` + // Minimum execution time: 38_473_000 picoseconds. + Weight::from_parts(39_578_000, 0) + .saturating_add(Weight::from_parts(0, 6078)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionAccount` (r:0 w:2) + /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn force_collection_owner() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `3549` + // Minimum execution time: 17_377_000 picoseconds. + Weight::from_parts(17_887_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn force_collection_config() -> Weight { + // Proof Size summary in bytes: + // Measured: `276` + // Estimated: `3549` + // Minimum execution time: 14_575_000 picoseconds. + Weight::from_parts(14_890_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn lock_item_properties() -> Weight { + // Proof Size summary in bytes: + // Measured: `435` + // Estimated: `3534` + // Minimum execution time: 18_864_000 picoseconds. + Weight::from_parts(19_401_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + fn set_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `539` + // Estimated: `3944` + // Minimum execution time: 48_949_000 picoseconds. + Weight::from_parts(50_054_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + fn force_set_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `344` + // Estimated: `3944` + // Minimum execution time: 25_545_000 picoseconds. + Weight::from_parts(26_189_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::Attribute` (r:1 w:1) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + fn clear_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `983` + // Estimated: `3944` + // Minimum execution time: 45_215_000 picoseconds. + Weight::from_parts(46_030_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) + fn approve_item_attributes() -> Weight { + // Proof Size summary in bytes: + // Measured: `381` + // Estimated: `4466` + // Minimum execution time: 17_084_000 picoseconds. + Weight::from_parts(17_758_000, 0) + .saturating_add(Weight::from_parts(0, 4466)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1001 w:1000) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn cancel_item_attributes_approval(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `760 + n * (398 ±0)` + // Estimated: `4466 + n * (2954 ±0)` + // Minimum execution time: 25_696_000 picoseconds. + Weight::from_parts(26_074_000, 0) + .saturating_add(Weight::from_parts(0, 4466)) + // Standard Error: 7_263 + .saturating_add(Weight::from_parts(6_492_893, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + fn set_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `539` + // Estimated: `3812` + // Minimum execution time: 40_890_000 picoseconds. + Weight::from_parts(41_530_000, 0) + .saturating_add(Weight::from_parts(0, 3812)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `849` + // Estimated: `3812` + // Minimum execution time: 38_847_000 picoseconds. + Weight::from_parts(39_924_000, 0) + .saturating_add(Weight::from_parts(0, 3812)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + fn set_collection_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `398` + // Estimated: `3759` + // Minimum execution time: 36_693_000 picoseconds. + Weight::from_parts(37_689_000, 0) + .saturating_add(Weight::from_parts(0, 3759)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) + /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) + fn clear_collection_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `716` + // Estimated: `3759` + // Minimum execution time: 36_168_000 picoseconds. + Weight::from_parts(36_757_000, 0) + .saturating_add(Weight::from_parts(0, 3759)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `410` + // Estimated: `4326` + // Minimum execution time: 20_589_000 picoseconds. + Weight::from_parts(21_153_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `418` + // Estimated: `4326` + // Minimum execution time: 18_133_000 picoseconds. + Weight::from_parts(18_701_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + fn clear_all_transfer_approvals() -> Weight { + // Proof Size summary in bytes: + // Measured: `418` + // Estimated: `4326` + // Minimum execution time: 16_809_000 picoseconds. + Weight::from_parts(17_391_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_accept_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3517` + // Minimum execution time: 14_878_000 picoseconds. + Weight::from_parts(15_275_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + fn set_collection_max_supply() -> Weight { + // Proof Size summary in bytes: + // Measured: `340` + // Estimated: `3549` + // Minimum execution time: 18_388_000 picoseconds. + Weight::from_parts(18_950_000, 0) + .saturating_add(Weight::from_parts(0, 3549)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn update_mint_settings() -> Weight { + // Proof Size summary in bytes: + // Measured: `323` + // Estimated: `3538` + // Minimum execution time: 18_190_000 picoseconds. + Weight::from_parts(18_552_000, 0) + .saturating_add(Weight::from_parts(0, 3538)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn set_price() -> Weight { + // Proof Size summary in bytes: + // Measured: `518` + // Estimated: `4326` + // Minimum execution time: 22_986_000 picoseconds. + Weight::from_parts(23_601_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:1 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:2) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn buy_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `705` + // Estimated: `4326` + // Minimum execution time: 49_098_000 picoseconds. + Weight::from_parts(50_262_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// The range of component `n` is `[0, 10]`. + fn pay_tips(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_208_000 picoseconds. + Weight::from_parts(3_312_261, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 6_242 + .saturating_add(Weight::from_parts(3_672_096, 0).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:2 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + fn create_swap() -> Weight { + // Proof Size summary in bytes: + // Measured: `494` + // Estimated: `7662` + // Minimum execution time: 20_906_000 picoseconds. + Weight::from_parts(21_412_000, 0) + .saturating_add(Weight::from_parts(0, 7662)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + fn cancel_swap() -> Weight { + // Proof Size summary in bytes: + // Measured: `513` + // Estimated: `4326` + // Minimum execution time: 20_250_000 picoseconds. + Weight::from_parts(20_703_000, 0) + .saturating_add(Weight::from_parts(0, 4326)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Nfts::Item` (r:2 w:2) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) + /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:0) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:2 w:0) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:4) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) + /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn claim_swap() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `7662` + // Minimum execution time: 83_471_000 picoseconds. + Weight::from_parts(85_349_000, 0) + .saturating_add(Weight::from_parts(0, 7662)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(10)) + } + /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) + /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Item` (r:1 w:1) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) + /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) + /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Account` (r:0 w:1) + /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 10]`. + fn mint_pre_signed(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `558` + // Estimated: `6078 + n * (2954 ±0)` + // Minimum execution time: 140_728_000 picoseconds. + Weight::from_parts(148_945_062, 0) + .saturating_add(Weight::from_parts(0, 6078)) + // Standard Error: 49_446 + .saturating_add(Weight::from_parts(30_948_884, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } + /// Storage: `Nfts::Item` (r:1 w:0) + /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) + /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) + /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) + /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) + /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Collection` (r:1 w:1) + /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Nfts::Attribute` (r:10 w:10) + /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 10]`. + fn set_attributes_pre_signed(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `4466 + n * (2954 ±0)` + // Minimum execution time: 82_713_000 picoseconds. + Weight::from_parts(95_912_559, 0) + .saturating_add(Weight::from_parts(0, 4466)) + // Standard Error: 73_934 + .saturating_add(Weight::from_parts(30_039_875, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nomination_pools.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nomination_pools.rs new file mode 100644 index 0000000000000..e826096eb5bfe --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_nomination_pools.rs @@ -0,0 +1,825 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_nomination_pools` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-04-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-dcu62vjg-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_nomination_pools +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_nomination_pools`. +pub struct WeightInfo(PhantomData); +impl pallet_nomination_pools::WeightInfo for WeightInfo { + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:1) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MaxPoolMembersPerPool` (r:1 w:0) + /// Proof: `NominationPools::MaxPoolMembersPerPool` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MaxPoolMembers` (r:1 w:0) + /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) + /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn join() -> Weight { + // Proof Size summary in bytes: + // Measured: `3606` + // Estimated: `8877` + // Minimum execution time: 204_877_000 picoseconds. + Weight::from_parts(210_389_000, 0) + .saturating_add(Weight::from_parts(0, 8877)) + .saturating_add(T::DbWeight::get().reads(24)) + .saturating_add(T::DbWeight::get().writes(15)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:1) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn bond_extra_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `3762` + // Estimated: `8877` + // Minimum execution time: 203_362_000 picoseconds. + Weight::from_parts(209_899_000, 0) + .saturating_add(Weight::from_parts(0, 8877)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(14)) + } + /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) + /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:1) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn bond_extra_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `3709` + // Estimated: `6248` + // Minimum execution time: 230_686_000 picoseconds. + Weight::from_parts(237_502_000, 0) + .saturating_add(Weight::from_parts(0, 6248)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(14)) + } + /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) + /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:1) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn claim_payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `1138` + // Estimated: `4182` + // Minimum execution time: 70_821_000 picoseconds. + Weight::from_parts(72_356_000, 0) + .saturating_add(Weight::from_parts(0, 4182)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:1) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForSubPoolsStorage` (r:1 w:1) + /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn unbond() -> Weight { + // Proof Size summary in bytes: + // Measured: `3341` + // Estimated: `8877` + // Minimum execution time: 156_714_000 picoseconds. + Weight::from_parts(158_305_000, 0) + .saturating_add(Weight::from_parts(0, 8877)) + .saturating_add(T::DbWeight::get().reads(18)) + .saturating_add(T::DbWeight::get().writes(11)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn pool_withdraw_unbonded(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1767` + // Estimated: `4556` + // Minimum execution time: 56_836_000 picoseconds. + Weight::from_parts(59_738_398, 0) + .saturating_add(Weight::from_parts(0, 4556)) + // Standard Error: 1_478 + .saturating_add(Weight::from_parts(60_085, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:1) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) + /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) + /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_update(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2405` + // Estimated: `4556` + // Minimum execution time: 136_737_000 picoseconds. + Weight::from_parts(141_757_658, 0) + .saturating_add(Weight::from_parts(0, 4556)) + // Standard Error: 2_609 + .saturating_add(Weight::from_parts(84_538, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(11)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:1) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:0) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) + /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) + /// Proof: `NominationPools::CounterForRewardPools` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForSubPoolsStorage` (r:1 w:1) + /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::Metadata` (r:1 w:1) + /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForBondedPools` (r:1 w:1) + /// Proof: `NominationPools::CounterForBondedPools` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) + /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2809` + // Estimated: `6274` + // Minimum execution time: 241_043_000 picoseconds. + Weight::from_parts(250_578_253, 0) + .saturating_add(Weight::from_parts(0, 6274)) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(26)) + } + /// Storage: `NominationPools::LastPoolId` (r:1 w:1) + /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:1 w:0) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MaxPools` (r:1 w:0) + /// Proof: `NominationPools::MaxPools` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForBondedPools` (r:1 w:1) + /// Proof: `NominationPools::CounterForBondedPools` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:1) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MaxPoolMembersPerPool` (r:1 w:0) + /// Proof: `NominationPools::MaxPoolMembersPerPool` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MaxPoolMembers` (r:1 w:0) + /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) + /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) + /// Proof: `NominationPools::CounterForRewardPools` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) + /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `1168` + // Estimated: `6196` + // Minimum execution time: 180_902_000 picoseconds. + Weight::from_parts(187_769_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(23)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:1 w:0) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16]`. + fn nominate(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1921` + // Estimated: `4556 + n * (2520 ±0)` + // Minimum execution time: 78_369_000 picoseconds. + Weight::from_parts(79_277_958, 0) + .saturating_add(Weight::from_parts(0, 4556)) + // Standard Error: 8_343 + .saturating_add(Weight::from_parts(1_493_255, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(15)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + fn set_state() -> Weight { + // Proof Size summary in bytes: + // Measured: `1406` + // Estimated: `4556` + // Minimum execution time: 32_631_000 picoseconds. + Weight::from_parts(33_356_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::Metadata` (r:1 w:1) + /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::CounterForMetadata` (r:1 w:1) + /// Proof: `NominationPools::CounterForMetadata` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 256]`. + fn set_metadata(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `498` + // Estimated: `3735` + // Minimum execution time: 12_514_000 picoseconds. + Weight::from_parts(13_232_732, 0) + .saturating_add(Weight::from_parts(0, 3735)) + // Standard Error: 150 + .saturating_add(Weight::from_parts(2_371, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `NominationPools::MinJoinBond` (r:0 w:1) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MaxPoolMembers` (r:0 w:1) + /// Proof: `NominationPools::MaxPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MaxPoolMembersPerPool` (r:0 w:1) + /// Proof: `NominationPools::MaxPoolMembersPerPool` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinCreateBond` (r:0 w:1) + /// Proof: `NominationPools::MinCreateBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:0 w:1) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MaxPools` (r:0 w:1) + /// Proof: `NominationPools::MaxPools` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_configs() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_107_000 picoseconds. + Weight::from_parts(3_255_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn update_roles() -> Weight { + // Proof Size summary in bytes: + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 16_568_000 picoseconds. + Weight::from_parts(17_019_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn chill() -> Weight { + // Proof Size summary in bytes: + // Measured: `2138` + // Estimated: `4556` + // Minimum execution time: 73_717_000 picoseconds. + Weight::from_parts(77_030_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn set_commission() -> Weight { + // Proof Size summary in bytes: + // Measured: `770` + // Estimated: `3719` + // Minimum execution time: 30_770_000 picoseconds. + Weight::from_parts(31_556_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_commission_max() -> Weight { + // Proof Size summary in bytes: + // Measured: `538` + // Estimated: `3719` + // Minimum execution time: 16_257_000 picoseconds. + Weight::from_parts(16_891_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn set_commission_change_rate() -> Weight { + // Proof Size summary in bytes: + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 16_548_000 picoseconds. + Weight::from_parts(18_252_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn set_commission_claim_permission() -> Weight { + // Proof Size summary in bytes: + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 16_085_000 picoseconds. + Weight::from_parts(17_218_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ClaimPermissions` (r:1 w:1) + /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) + fn set_claim_permission() -> Weight { + // Proof Size summary in bytes: + // Measured: `508` + // Estimated: `4182` + // Minimum execution time: 13_648_000 picoseconds. + Weight::from_parts(13_990_000, 0) + .saturating_add(Weight::from_parts(0, 4182)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::RewardPools` (r:1 w:1) + /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) + /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn claim_commission() -> Weight { + // Proof Size summary in bytes: + // Measured: `968` + // Estimated: `3719` + // Minimum execution time: 60_321_000 picoseconds. + Weight::from_parts(61_512_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:1) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + fn adjust_pool_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `876` + // Estimated: `4764` + // Minimum execution time: 65_609_000 picoseconds. + Weight::from_parts(67_320_000, 0) + .saturating_add(Weight::from_parts(0, 4764)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `3328` + // Estimated: `4556` + // Minimum execution time: 99_605_000 picoseconds. + Weight::from_parts(101_986_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:1 w:0) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + fn apply_slash_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `3070` + // Estimated: `4556` + // Minimum execution time: 58_103_000 picoseconds. + Weight::from_parts(59_680_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(7)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:1) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:1) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForAgents` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForAgents` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForVirtualStakers` (r:1 w:1) + /// Proof: `Staking::CounterForVirtualStakers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn pool_migrate() -> Weight { + // Proof Size summary in bytes: + // Measured: `1359` + // Estimated: `6196` + // Minimum execution time: 144_098_000 picoseconds. + Weight::from_parts(146_590_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(11)) + } + /// Storage: `NominationPools::PoolMembers` (r:1 w:0) + /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::BondedPools` (r:1 w:0) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:0) + /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::MinJoinBond` (r:1 w:0) + /// Proof: `NominationPools::MinJoinBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Delegators` (r:2 w:2) + /// Proof: `DelegatedStaking::Delegators` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:2 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:2 w:2) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::CounterForDelegators` (r:1 w:1) + /// Proof: `DelegatedStaking::CounterForDelegators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn migrate_delegation() -> Weight { + // Proof Size summary in bytes: + // Measured: `2275` + // Estimated: `6180` + // Minimum execution time: 148_594_000 picoseconds. + Weight::from_parts(152_119_000, 0) + .saturating_add(Weight::from_parts(0, 6180)) + .saturating_add(T::DbWeight::get().reads(15)) + .saturating_add(T::DbWeight::get().writes(6)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_preimage.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_preimage.rs new file mode 100644 index 0000000000000..a61764ce76294 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_preimage.rs @@ -0,0 +1,234 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_preimage` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_preimage +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_preimage`. +pub struct WeightInfo(PhantomData); +impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Preimage PreimageFor (r:0 w:1) + /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// The range of component `s` is `[0, 4194304]`. + fn note_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `181` + // Estimated: `3556` + // Minimum execution time: 30_248_000 picoseconds. + Weight::from_parts(30_746_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(3_563, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Preimage PreimageFor (r:0 w:1) + /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// The range of component `s` is `[0, 4194304]`. + fn note_requested_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `3556` + // Minimum execution time: 16_748_000 picoseconds. + Weight::from_parts(17_025_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 12 + .saturating_add(Weight::from_parts(3_559, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Preimage PreimageFor (r:0 w:1) + /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + /// The range of component `s` is `[0, 4194304]`. + fn note_no_deposit_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `3556` + // Minimum execution time: 16_353_000 picoseconds. + Weight::from_parts(16_501_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 11 + .saturating_add(Weight::from_parts(3_615, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Preimage PreimageFor (r:0 w:1) + /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + fn unnote_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `327` + // Estimated: `3556` + // Minimum execution time: 52_924_000 picoseconds. + Weight::from_parts(77_162_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Preimage PreimageFor (r:0 w:1) + /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + fn unnote_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `182` + // Estimated: `3556` + // Minimum execution time: 33_660_000 picoseconds. + Weight::from_parts(53_453_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + fn request_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `226` + // Estimated: `3556` + // Minimum execution time: 29_363_000 picoseconds. + Weight::from_parts(47_779_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + fn request_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `182` + // Estimated: `3556` + // Minimum execution time: 21_614_000 picoseconds. + Weight::from_parts(37_598_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + fn request_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `80` + // Estimated: `3556` + // Minimum execution time: 28_867_000 picoseconds. + Weight::from_parts(41_737_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + fn request_requested_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `3556` + // Minimum execution time: 11_595_000 picoseconds. + Weight::from_parts(16_316_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Preimage PreimageFor (r:0 w:1) + /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: MaxEncodedLen) + fn unrequest_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `182` + // Estimated: `3556` + // Minimum execution time: 33_521_000 picoseconds. + Weight::from_parts(50_094_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + fn unrequest_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `3556` + // Minimum execution time: 11_048_000 picoseconds. + Weight::from_parts(15_393_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Preimage StatusFor (r:1 w:1) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + fn unrequest_multi_referenced_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `3556` + // Minimum execution time: 11_983_000 picoseconds. + Weight::from_parts(14_983_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_proxy.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_proxy.rs new file mode 100644 index 0000000000000..a4e73b8bc0c9a --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_proxy.rs @@ -0,0 +1,230 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + fn poke_deposit() -> Weight { + Default::default() + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 15_673_000 picoseconds. + Weight::from_parts(16_387_670, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_721 + .saturating_add(Weight::from_parts(43_526, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 36_942_000 picoseconds. + Weight::from_parts(36_433_953, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_462 + .saturating_add(Weight::from_parts(143_560, 0).saturating_mul(a.into())) + // Standard Error: 2_544 + .saturating_add(Weight::from_parts(60_294, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 23_781_000 picoseconds. + Weight::from_parts(24_589_553, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_805 + .saturating_add(Weight::from_parts(121_040, 0).saturating_mul(a.into())) + // Standard Error: 1_865 + .saturating_add(Weight::from_parts(8_151, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 23_868_000 picoseconds. + Weight::from_parts(24_246_179, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_752 + .saturating_add(Weight::from_parts(124_703, 0).saturating_mul(a.into())) + // Standard Error: 1_810 + .saturating_add(Weight::from_parts(21_348, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_352_000 picoseconds. + Weight::from_parts(33_156_164, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_284 + .saturating_add(Weight::from_parts(127_696, 0).saturating_mul(a.into())) + // Standard Error: 1_327 + .saturating_add(Weight::from_parts(44_544, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_620_000 picoseconds. + Weight::from_parts(25_499_887, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_155 + .saturating_add(Weight::from_parts(43_095, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_614_000 picoseconds. + Weight::from_parts(25_685_644, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_932 + .saturating_add(Weight::from_parts(39_563, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_287_000 picoseconds. + Weight::from_parts(22_951_970, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_008 + .saturating_add(Weight::from_parts(30_530, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_685_000 picoseconds. + Weight::from_parts(27_473_088, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_684 + .saturating_add(Weight::from_parts(18_278, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_799_000 picoseconds. + Weight::from_parts(23_794_924, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_174 + .saturating_add(Weight::from_parts(29_777, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_referenda.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_referenda.rs new file mode 100644 index 0000000000000..dec3cb021af93 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_referenda.rs @@ -0,0 +1,523 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: Referenda ReferendumCount (r:1 w:1) + /// Proof: Referenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:0 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `186` + // Estimated: `42428` + // Minimum execution time: 39_146_000 picoseconds. + Weight::from_parts(40_383_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 51_385_000 picoseconds. + Weight::from_parts(52_701_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3196` + // Estimated: `42428` + // Minimum execution time: 70_018_000 picoseconds. + Weight::from_parts(75_868_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3216` + // Estimated: `42428` + // Minimum execution time: 69_311_000 picoseconds. + Weight::from_parts(72_425_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 64_385_000 picoseconds. + Weight::from_parts(66_178_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 62_200_000 picoseconds. + Weight::from_parts(63_782_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `4401` + // Minimum execution time: 29_677_000 picoseconds. + Weight::from_parts(30_603_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `269` + // Estimated: `4401` + // Minimum execution time: 29_897_000 picoseconds. + Weight::from_parts(30_618_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `83866` + // Minimum execution time: 37_697_000 picoseconds. + Weight::from_parts(38_953_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:1 w:0) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `83866` + // Minimum execution time: 106_001_000 picoseconds. + Weight::from_parts(107_102_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda TrackQueue (r:1 w:0) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `102` + // Estimated: `5477` + // Minimum execution time: 8_987_000 picoseconds. + Weight::from_parts(9_431_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3116` + // Estimated: `42428` + // Minimum execution time: 55_344_000 picoseconds. + Weight::from_parts(58_026_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3116` + // Estimated: `42428` + // Minimum execution time: 57_003_000 picoseconds. + Weight::from_parts(60_347_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 23_001_000 picoseconds. + Weight::from_parts(24_812_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 23_299_000 picoseconds. + Weight::from_parts(24_465_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2943` + // Estimated: `5477` + // Minimum execution time: 28_223_000 picoseconds. + Weight::from_parts(29_664_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2963` + // Estimated: `5477` + // Minimum execution time: 27_474_000 picoseconds. + Weight::from_parts(29_072_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `299` + // Estimated: `42428` + // Minimum execution time: 24_405_000 picoseconds. + Weight::from_parts(25_184_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 24_572_000 picoseconds. + Weight::from_parts(25_287_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `4401` + // Minimum execution time: 16_042_000 picoseconds. + Weight::from_parts(16_610_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 33_639_000 picoseconds. + Weight::from_parts(34_749_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 36_467_000 picoseconds. + Weight::from_parts(37_693_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 29_857_000 picoseconds. + Weight::from_parts(30_840_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `42428` + // Minimum execution time: 31_028_000 picoseconds. + Weight::from_parts(32_154_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 28_594_000 picoseconds. + Weight::from_parts(29_092_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `42428` + // Minimum execution time: 27_246_000 picoseconds. + Weight::from_parts(28_003_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Scheduler Lookup (r:1 w:1) + /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `83866` + // Minimum execution time: 43_426_000 picoseconds. + Weight::from_parts(44_917_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 30_285_000 picoseconds. + Weight::from_parts(31_575_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Preimage StatusFor (r:1 w:0) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:0 w:1) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4401` + // Minimum execution time: 19_254_000 picoseconds. + Weight::from_parts(19_855_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:1 w:1) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `283` + // Estimated: `4401` + // Minimum execution time: 16_957_000 picoseconds. + Weight::from_parts(17_556_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_scheduler.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_scheduler.rs new file mode 100644 index 0000000000000..e115eb8456f2f --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_scheduler.rs @@ -0,0 +1,286 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_scheduler` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_scheduler +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_scheduler`. +pub struct WeightInfo(PhantomData); +impl pallet_scheduler::WeightInfo for WeightInfo { + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn service_agendas_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1489` + // Minimum execution time: 3_220_000 picoseconds. + Weight::from_parts(3_512_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 50]`. + fn service_agenda_base(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `116 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 3_565_000 picoseconds. + Weight::from_parts(6_102_216, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 1_413 + .saturating_add(Weight::from_parts(339_016, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_940_000 picoseconds. + Weight::from_parts(3_070_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `217 + s * (1 ±0)` + // Estimated: `3682 + s * (1 ±0)` + // Minimum execution time: 16_602_000 picoseconds. + Weight::from_parts(16_834_000, 0) + .saturating_add(Weight::from_parts(0, 3682)) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_307, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) + } + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn service_task_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_202_000 picoseconds. + Weight::from_parts(4_383_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_periodic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_917_000 picoseconds. + Weight::from_parts(3_043_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_707_000 picoseconds. + Weight::from_parts(1_802_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_671_000 picoseconds. + Weight::from_parts(1_796_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 49]`. + fn schedule(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `116 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(12_146_613, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 1_381 + .saturating_add(Weight::from_parts(360_418, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn cancel(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `116 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 13_079_000 picoseconds. + Weight::from_parts(12_921_017, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 1_112 + .saturating_add(Weight::from_parts(538_089, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 49]`. + fn schedule_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `293 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 12_458_000 picoseconds. + Weight::from_parts(16_009_539, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 2_260 + .saturating_add(Weight::from_parts(399_245, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn cancel_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `319 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 15_173_000 picoseconds. + Weight::from_parts(15_602_728, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(557_878, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Retries` (r:1 w:2) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn schedule_retry(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `197` + // Estimated: `42428` + // Minimum execution time: 13_531_000 picoseconds. + Weight::from_parts(13_985_249, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 619 + .saturating_add(Weight::from_parts(39_068, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `116 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 8_050_000 picoseconds. + Weight::from_parts(8_440_627, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `325 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 10_876_000 picoseconds. + Weight::from_parts(11_708_172, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `116 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 8_050_000 picoseconds. + Weight::from_parts(8_440_627, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `325 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 10_876_000 picoseconds. + Weight::from_parts(11_708_172, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_session.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_session.rs new file mode 100644 index 0000000000000..461033650d5d9 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_session.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_session` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_session +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_session`. +pub struct WeightInfo(PhantomData); +impl pallet_session::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:1 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `270` + // Estimated: `3735` + // Minimum execution time: 16_380_000 picoseconds. + Weight::from_parts(16_767_000, 0) + .saturating_add(Weight::from_parts(0, 3735)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn purge_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `3707` + // Minimum execution time: 12_158_000 picoseconds. + Weight::from_parts(12_835_000, 0) + .saturating_add(Weight::from_parts(0, 3707)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_staking_async.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_staking_async.rs new file mode 100644 index 0000000000000..fac7f0256f0eb --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_staking_async.rs @@ -0,0 +1,851 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_staking_async` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-09-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-obbyq9g6-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_staking_async +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_staking_async`. +pub struct WeightInfo(PhantomData); +impl pallet_staking_async::WeightInfo for WeightInfo { + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn bond() -> Weight { + // Proof Size summary in bytes: + // Measured: `6750` + // Estimated: `4218` + // Minimum execution time: 167_701_000 picoseconds. + Weight::from_parts(169_311_000, 4218) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + fn bond_extra() -> Weight { + // Proof Size summary in bytes: + // Measured: `8079` + // Estimated: `8877` + // Minimum execution time: 9_824_554_000 picoseconds. + Weight::from_parts(12_021_250_000, 8877) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + fn unbond() -> Weight { + // Proof Size summary in bytes: + // Measured: `8259` + // Estimated: `8877` + // Minimum execution time: 12_169_771_000 picoseconds. + Weight::from_parts(13_911_804_000, 8877) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_update(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7406` + // Estimated: `4218` + // Minimum execution time: 187_501_000 picoseconds. + Weight::from_parts(190_541_700, 4218) + // Standard Error: 4_787 + .saturating_add(Weight::from_parts(9_065, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11638 + s * (5 ±0)` + // Minimum execution time: 12_770_155_000 picoseconds. + Weight::from_parts(19_677_901_566, 11638) + // Standard Error: 12_737_389 + .saturating_add(Weight::from_parts(70_782_130, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().writes(11_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5).saturating_mul(s.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn validate() -> Weight { + // Proof Size summary in bytes: + // Measured: `9938` + // Estimated: `4218` + // Minimum execution time: 179_051_000 picoseconds. + Weight::from_parts(181_951_000, 4218) + .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// The range of component `k` is `[1, 128]`. + fn kick(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `11184 + k * (1132 ±0)` + // Estimated: `4218 + k * (3033 ±0)` + // Minimum execution time: 125_011_000 picoseconds. + Weight::from_parts(139_775_678, 4218) + // Standard Error: 124_229 + .saturating_add(Weight::from_parts(20_473_526, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16]`. + fn nominate(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `4898 + n * (71 ±0)` + // Estimated: `6248 + n * (2520 ±0)` + // Minimum execution time: 7_817_348_000 picoseconds. + Weight::from_parts(12_966_679_697, 6248) + // Standard Error: 70_710_808 + .saturating_add(Weight::from_parts(108_404_036, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn chill() -> Weight { + // Proof Size summary in bytes: + // Measured: `5073` + // Estimated: `6248` + // Minimum execution time: 4_771_225_000 picoseconds. + Weight::from_parts(11_721_807_000, 6248) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn set_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `4060` + // Estimated: `4218` + // Minimum execution time: 68_151_000 picoseconds. + Weight::from_parts(69_181_000, 4218) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `5732` + // Estimated: `4218` + // Minimum execution time: 84_081_000 picoseconds. + Weight::from_parts(86_531_000, 4218) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + fn set_controller() -> Weight { + // Proof Size summary in bytes: + // Measured: `5403` + // Estimated: `7446` + // Minimum execution time: 84_140_000 picoseconds. + Weight::from_parts(86_250_000, 7446) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_validator_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_270_000 picoseconds. + Weight::from_parts(7_670_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_no_eras() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_090_000 picoseconds. + Weight::from_parts(28_140_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_new_era() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_520_000 picoseconds. + Weight::from_parts(28_100_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_new_era_always() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_651_000 picoseconds. + Weight::from_parts(27_950_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `v` is `[0, 20]`. + fn set_invulnerables(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_460_000 picoseconds. + Weight::from_parts(8_198_000, 0) + // Standard Error: 6_827 + .saturating_add(Weight::from_parts(23_133, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Ledger` (r:1502 w:1502) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:751 w:751) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:751 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// The range of component `u` is `[0, 751]`. + fn deprecate_controller_batch(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `99179 + u * (1300 ±0)` + // Estimated: `990 + u * (6456 ±0)` + // Minimum execution time: 13_550_000 picoseconds. + Weight::from_parts(282_563_888, 990) + // Standard Error: 138_635 + .saturating_add(Weight::from_parts(64_878_892, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 6456).saturating_mul(u.into())) + } + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn force_unstake(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11638 + s * (5 ±0)` + // Minimum execution time: 13_563_169_000 picoseconds. + Weight::from_parts(18_031_564_700, 11638) + // Standard Error: 15_252_700 + .saturating_add(Weight::from_parts(88_205_545, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().writes(12_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5).saturating_mul(s.into())) + } + /// Storage: `Staking::UnappliedSlashes` (r:1000 w:1000) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 1000]`. + fn cancel_deferred_slash(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `671 + s * (122 ±0)` + // Estimated: `990 + s * (5706 ±0)` + // Minimum execution time: 39_790_000 picoseconds. + Weight::from_parts(40_220_000, 990) + // Standard Error: 45_514 + .saturating_add(Weight::from_parts(13_403_172, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5706).saturating_mul(s.into())) + } + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ErasClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:65 w:65) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:65 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:65 w:65) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:65 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 64]`. + fn payout_stakers_alive_staked(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `20405 + n * (3951 ±0)` + // Estimated: `24715 + n * (3920 ±72)` + // Minimum execution time: 9_680_492_000 picoseconds. + Weight::from_parts(19_051_608_733, 24715) + // Standard Error: 48_415_392 + .saturating_add(Weight::from_parts(426_518_637, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 3920).saturating_mul(n.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// The range of component `l` is `[1, 32]`. + fn rebond(_l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8081 + l * (4 ±0)` + // Estimated: `8877` + // Minimum execution time: 10_942_970_000 picoseconds. + Weight::from_parts(17_242_952_692, 8877) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. + fn reap_stash(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11682 + s * (4 ±0)` + // Minimum execution time: 14_166_693_000 picoseconds. + Weight::from_parts(22_160_899_341, 11682) + // Standard Error: 9_189_865 + .saturating_add(Weight::from_parts(61_155_081, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(12_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_staking_configs_all_set() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 13_480_000 picoseconds. + Weight::from_parts(13_920_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_staking_configs_all_remove() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_710_000 picoseconds. + Weight::from_parts(12_190_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn chill_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `5165` + // Estimated: `6248` + // Minimum execution time: 9_587_859_000 picoseconds. + Weight::from_parts(11_081_581_000, 6248) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + fn force_apply_min_commission() -> Weight { + // Proof Size summary in bytes: + // Measured: `663` + // Estimated: `3510` + // Minimum execution time: 4_676_454_000 picoseconds. + Weight::from_parts(4_721_314_000, 3510) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_min_commission() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_120_000 picoseconds. + Weight::from_parts(7_490_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `7127` + // Estimated: `4764` + // Minimum execution time: 137_321_000 picoseconds. + Weight::from_parts(138_981_000, 4764) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + fn migrate_currency() -> Weight { + // Proof Size summary in bytes: + // Measured: `7039` + // Estimated: `4764` + // Minimum execution time: 204_921_000 picoseconds. + Weight::from_parts(208_462_000, 4764) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:65 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:65 w:65) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:65 w:65) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:65 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:65 w:65) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `216367` + // Estimated: `210810` + // Minimum execution time: 36_638_657_000 picoseconds. + Weight::from_parts(39_174_206_000, 210810) + .saturating_add(T::DbWeight::get().reads(457_u64)) + .saturating_add(T::DbWeight::get().writes(261_u64)) + } + /// Storage: `Staking::ProcessingOffence` (r:1 w:1) + /// Proof: `Staking::ProcessingOffence` (`max_values`: Some(1), `max_size`: Some(85), added: 580, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:2 w:1) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashRewardFraction` (r:1 w:0) + /// Proof: `Staking::SlashRewardFraction` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::SlashingSpans` (r:65 w:65) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::NominatorSlashInEra` (r:64 w:64) + /// Proof: `Staking::NominatorSlashInEra` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:64 w:64) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// Storage: `Staking::UnappliedSlashes` (r:0 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + fn process_offence_queue() -> Weight { + // Proof Size summary in bytes: + // Measured: `5078` + // Estimated: `166943` + // Minimum execution time: 43_344_626_000 picoseconds. + Weight::from_parts(50_068_965_000, 166943) + .saturating_add(T::DbWeight::get().reads(200_u64)) + .saturating_add(T::DbWeight::get().writes(197_u64)) + } + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::Invulnerables` (r:1 w:0) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:500 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorSlashInEra` (r:500 w:500) + /// Proof: `Staking::ValidatorSlashInEra` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:500 w:500) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// The range of component `v` is `[2, 500]`. + fn rc_on_offence(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `517 + v * (96 ±0)` + // Estimated: `3481 + v * (2576 ±0)` + // Minimum execution time: 4_330_171_000 picoseconds. + Weight::from_parts(22_551_592_842, 3481) + // Standard Error: 13_047_521 + .saturating_add(Weight::from_parts(130_976_524, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2576).saturating_mul(v.into())) + } + /// Storage: `Staking::ActiveEra` (r:1 w:1) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:0) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:1 w:0) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::BondedEras` (r:1 w:1) + /// Proof: `Staking::BondedEras` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ForceEra` (r:1 w:0) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::VoterSnapshotStatus` (r:0 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:0 w:1) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::NextElectionPage` (r:0 w:1) + /// Proof: `Staking::NextElectionPage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ElectableStashes` (r:0 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `MaxEncodedLen`) + fn rc_on_session_report() -> Weight { + // Proof Size summary in bytes: + // Measured: `1067` + // Estimated: `4532` + // Minimum execution time: 12_249_491_000 picoseconds. + Weight::from_parts(13_956_694_000, 4532) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_sudo.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_sudo.rs new file mode 100644 index 0000000000000..b517ab558fc9b --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_sudo.rs @@ -0,0 +1,108 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_sudo` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_sudo +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_sudo`. +pub struct WeightInfo(PhantomData); +impl pallet_sudo::WeightInfo for WeightInfo { + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn set_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 8_750_000 picoseconds. + Weight::from_parts(9_102_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn sudo() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 9_607_000 picoseconds. + Weight::from_parts(10_139_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn sudo_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 9_886_000 picoseconds. + Weight::from_parts(10_175_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_152_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 2_875_000 picoseconds. + Weight::from_parts(6_803_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_timestamp.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000..7998d88434087 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_timestamp.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 9_347_000 picoseconds. + Weight::from_parts(9_686_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 3_375_000 picoseconds. + Weight::from_parts(3_422_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_transaction_payment.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000..ab99081b0989f --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,70 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-12-21, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `gleipnir`, CPU: `AMD Ryzen 9 7900X 12-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --wasm-execution=compiled +// --pallet=pallet_transaction_payment +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/ +// --chain=asset-hub-next-westend-dev + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) + /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3593` + // Minimum execution time: 40_847_000 picoseconds. + Weight::from_parts(49_674_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_treasury.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_treasury.rs new file mode 100644 index 0000000000000..80fd800176080 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_treasury.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_treasury` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-07, STEPS: `50`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=50 +// --repeat=2 +// --pallet=pallet_treasury +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./runtime/rococo/src/weights/ +// --header=./file_header.txt + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_treasury`. +pub struct WeightInfo(PhantomData); +impl pallet_treasury::WeightInfo for WeightInfo { + /// Storage: Treasury ProposalCount (r:1 w:1) + /// Proof: Treasury ProposalCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: Treasury Proposals (r:0 w:1) + /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + fn spend_local() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `1887` + // Minimum execution time: 177_000_000 picoseconds. + Weight::from_parts(191_000_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + fn remove_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1887` + // Minimum execution time: 80_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Treasury Deactivated (r:1 w:1) + /// Proof: Treasury Deactivated (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:1) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Treasury Approvals (r:1 w:1) + /// Proof: Treasury Approvals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// Storage: Treasury Proposals (r:99 w:99) + /// Proof: Treasury Proposals (max_values: None, max_size: Some(108), added: 2583, mode: MaxEncodedLen) + /// Storage: System Account (r:199 w:199) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Bounties BountyApprovals (r:1 w:1) + /// Proof: Bounties BountyApprovals (max_values: Some(1), max_size: Some(402), added: 897, mode: MaxEncodedLen) + /// The range of component `p` is `[0, 99]`. + fn on_initialize_proposals(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `331 + p * (251 ±0)` + // Estimated: `3593 + p * (5206 ±0)` + // Minimum execution time: 887_000_000 picoseconds. + Weight::from_parts(828_616_021, 0) + .saturating_add(Weight::from_parts(0, 3593)) + // Standard Error: 695_351 + .saturating_add(Weight::from_parts(566_114_524, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 5206).saturating_mul(p.into())) + } + /// Storage: AssetRate ConversionRateToNative (r:1 w:0) + /// Proof: AssetRate ConversionRateToNative (max_values: None, max_size: Some(1237), added: 3712, mode: MaxEncodedLen) + /// Storage: Treasury SpendCount (r:1 w:1) + /// Proof: Treasury SpendCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Treasury Spends (r:0 w:1) + /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + fn spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `114` + // Estimated: `4702` + // Minimum execution time: 208_000_000 picoseconds. + Weight::from_parts(222_000_000, 0) + .saturating_add(Weight::from_parts(0, 4702)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Treasury Spends (r:1 w:1) + /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: XcmPallet QueryCounter (r:1 w:1) + /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Configuration ActiveConfig (r:1 w:0) + /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) + /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) + /// Storage: XcmPallet SupportedVersion (r:1 w:0) + /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) + /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) + /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Dmp DownwardMessageQueues (r:1 w:1) + /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) + /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) + /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: XcmPallet Queries (r:0 w:1) + /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `737` + // Estimated: `5313` + // Minimum execution time: 551_000_000 picoseconds. + Weight::from_parts(569_000_000, 0) + .saturating_add(Weight::from_parts(0, 5313)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: Treasury Spends (r:1 w:1) + /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + /// Storage: XcmPallet Queries (r:1 w:1) + /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + fn check_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `442` + // Estimated: `5313` + // Minimum execution time: 245_000_000 picoseconds. + Weight::from_parts(281_000_000, 0) + .saturating_add(Weight::from_parts(0, 5313)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Treasury Spends (r:1 w:1) + /// Proof: Treasury Spends (max_values: None, max_size: Some(1848), added: 4323, mode: MaxEncodedLen) + fn void_spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `172` + // Estimated: `5313` + // Minimum execution time: 147_000_000 picoseconds. + Weight::from_parts(160_000_000, 0) + .saturating_add(Weight::from_parts(0, 5313)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_uniques.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_uniques.rs new file mode 100644 index 0000000000000..0ec998a61e178 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_uniques.rs @@ -0,0 +1,466 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_uniques` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_uniques +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_uniques`. +pub struct WeightInfo(PhantomData); +impl pallet_uniques::WeightInfo for WeightInfo { + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3643` + // Minimum execution time: 30_321_000 picoseconds. + Weight::from_parts(31_831_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn force_create() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3643` + // Minimum execution time: 13_556_000 picoseconds. + Weight::from_parts(13_887_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1001 w:1000) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1000 w:1000) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1000) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + /// The range of component `m` is `[0, 1000]`. + /// The range of component `a` is `[0, 1000]`. + fn destroy(n: u32, m: u32, a: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `257 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` + // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` + // Minimum execution time: 3_038_253_000 picoseconds. + Weight::from_parts(3_097_477_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + // Standard Error: 36_951 + .saturating_add(Weight::from_parts(7_368_466, 0).saturating_mul(n.into())) + // Standard Error: 36_951 + .saturating_add(Weight::from_parts(481_367, 0).saturating_mul(m.into())) + // Standard Error: 36_951 + .saturating_add(Weight::from_parts(563_245, 0).saturating_mul(a.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) + .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) + .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) + } + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:0) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + fn mint() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 35_343_000 picoseconds. + Weight::from_parts(35_755_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:1) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn burn() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 36_465_000 picoseconds. + Weight::from_parts(37_139_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:2) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 26_394_000 picoseconds. + Weight::from_parts(26_920_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:5000 w:5000) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5000]`. + fn redeposit(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `738 + i * (76 ±0)` + // Estimated: `3643 + i * (2597 ±0)` + // Minimum execution time: 14_445_000 picoseconds. + Weight::from_parts(14_661_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + // Standard Error: 23_835 + .saturating_add(Weight::from_parts(17_951_538, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 2597).saturating_mul(i.into())) + } + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 18_602_000 picoseconds. + Weight::from_parts(18_954_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn thaw() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 18_328_000 picoseconds. + Weight::from_parts(18_919_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn freeze_collection() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 13_574_000 picoseconds. + Weight::from_parts(13_921_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn thaw_collection() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 13_469_000 picoseconds. + Weight::from_parts(13_999_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:2) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn transfer_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `356` + // Estimated: `3643` + // Minimum execution time: 21_962_000 picoseconds. + Weight::from_parts(22_330_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn set_team() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 13_869_000 picoseconds. + Weight::from_parts(14_486_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassAccount` (r:0 w:1) + /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + fn force_item_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 16_965_000 picoseconds. + Weight::from_parts(17_320_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1 w:1) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) + fn set_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `559` + // Estimated: `3652` + // Minimum execution time: 38_300_000 picoseconds. + Weight::from_parts(39_057_000, 0) + .saturating_add(Weight::from_parts(0, 3652)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Attribute` (r:1 w:1) + /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) + fn clear_attribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `756` + // Estimated: `3652` + // Minimum execution time: 37_420_000 picoseconds. + Weight::from_parts(38_087_000, 0) + .saturating_add(Weight::from_parts(0, 3652)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + fn set_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `348` + // Estimated: `3652` + // Minimum execution time: 29_457_000 picoseconds. + Weight::from_parts(30_163_000, 0) + .saturating_add(Weight::from_parts(0, 3652)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) + /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `559` + // Estimated: `3652` + // Minimum execution time: 30_471_000 picoseconds. + Weight::from_parts(30_893_000, 0) + .saturating_add(Weight::from_parts(0, 3652)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:1) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) + fn set_collection_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 30_465_000 picoseconds. + Weight::from_parts(31_298_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) + /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) + fn clear_collection_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `473` + // Estimated: `3643` + // Minimum execution time: 29_491_000 picoseconds. + Weight::from_parts(30_096_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + fn approve_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `428` + // Estimated: `3643` + // Minimum execution time: 19_122_000 picoseconds. + Weight::from_parts(19_697_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + fn cancel_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `461` + // Estimated: `3643` + // Minimum execution time: 19_016_000 picoseconds. + Weight::from_parts(19_352_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) + /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_accept_ownership() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3517` + // Minimum execution time: 14_955_000 picoseconds. + Weight::from_parts(15_463_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:1) + /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + fn set_collection_max_supply() -> Weight { + // Proof Size summary in bytes: + // Measured: `282` + // Estimated: `3643` + // Minimum execution time: 16_155_000 picoseconds. + Weight::from_parts(16_535_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Asset` (r:1 w:0) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + fn set_price() -> Weight { + // Proof Size summary in bytes: + // Measured: `259` + // Estimated: `3587` + // Minimum execution time: 16_135_000 picoseconds. + Weight::from_parts(16_686_000, 0) + .saturating_add(Weight::from_parts(0, 3587)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Uniques::Asset` (r:1 w:1) + /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) + /// Storage: `Uniques::ItemPriceOf` (r:1 w:1) + /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Class` (r:1 w:0) + /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) + /// Storage: `Uniques::Account` (r:0 w:2) + /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) + fn buy_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `540` + // Estimated: `3643` + // Minimum execution time: 35_899_000 picoseconds. + Weight::from_parts(37_432_000, 0) + .saturating_add(Weight::from_parts(0, 3643)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_utility.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000..960e210795209 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_utility.rs @@ -0,0 +1,114 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=asset-hub-next-westend-dev +// --wasm-execution=compiled +// --pallet=pallet_utility +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_596_000 picoseconds. + Weight::from_parts(6_795_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_304 + .saturating_add(Weight::from_parts(6_036_412, 0).saturating_mul(c.into())) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_877_000 picoseconds. + Weight::from_parts(5_175_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_533_000 picoseconds. + Weight::from_parts(6_652_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_270 + .saturating_add(Weight::from_parts(6_403_555, 0).saturating_mul(c.into())) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_628_000 picoseconds. + Weight::from_parts(9_057_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_619_000 picoseconds. + Weight::from_parts(380_833, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 3_765 + .saturating_add(Weight::from_parts(6_028_416, 0).saturating_mul(c.into())) + } + + fn dispatch_as_fallible() -> Weight { + Default::default() + } + + fn if_else() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_000_000 picoseconds. + Weight::from_parts(7_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_vesting.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_vesting.rs new file mode 100644 index 0000000000000..f461ad9cbc0a1 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_vesting.rs @@ -0,0 +1,265 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_vesting` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_vesting +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_vesting`. +pub struct WeightInfo(PhantomData); +impl pallet_vesting::WeightInfo for WeightInfo { + /// Storage: Vesting Vesting (r:1 w:1) + /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. + fn vest_locked(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 35_225_000 picoseconds. + Weight::from_parts(34_420_748, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 2_341 + .saturating_add(Weight::from_parts(41_794, 0).saturating_mul(l.into())) + // Standard Error: 4_166 + .saturating_add(Weight::from_parts(114_507, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Vesting Vesting (r:1 w:1) + /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. + fn vest_unlocked(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 38_507_000 picoseconds. + Weight::from_parts(38_552_717, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 2_406 + .saturating_add(Weight::from_parts(42_332, 0).saturating_mul(l.into())) + // Standard Error: 4_282 + .saturating_add(Weight::from_parts(67_638, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Vesting Vesting (r:1 w:1) + /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. + fn vest_other_locked(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 36_919_000 picoseconds. + Weight::from_parts(35_087_984, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 2_435 + .saturating_add(Weight::from_parts(66_131, 0).saturating_mul(l.into())) + // Standard Error: 4_333 + .saturating_add(Weight::from_parts(125_178, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Vesting Vesting (r:1 w:1) + /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. + fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 40_393_000 picoseconds. + Weight::from_parts(39_522_987, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_958 + .saturating_add(Weight::from_parts(46_626, 0).saturating_mul(l.into())) + // Standard Error: 3_484 + .saturating_add(Weight::from_parts(94_547, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Vesting Vesting (r:1 w:1) + /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[0, 27]`. + fn vested_transfer(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `522 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 72_925_000 picoseconds. + Weight::from_parts(75_858_529, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 3_995 + .saturating_add(Weight::from_parts(70_032, 0).saturating_mul(l.into())) + // Standard Error: 7_108 + .saturating_add(Weight::from_parts(160_507, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Vesting Vesting (r:1 w:1) + /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) + /// Storage: System Account (r:2 w:2) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[0, 27]`. + fn force_vested_transfer(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `625 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `6196` + // Minimum execution time: 74_405_000 picoseconds. + Weight::from_parts(78_253_087, 0) + .saturating_add(Weight::from_parts(0, 6196)) + // Standard Error: 3_708 + .saturating_add(Weight::from_parts(56_748, 0).saturating_mul(l.into())) + // Standard Error: 6_598 + .saturating_add(Weight::from_parts(146_713, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Vesting Vesting (r:1 w:1) + /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `449 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 37_715_000 picoseconds. + Weight::from_parts(36_483_330, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 2_146 + .saturating_add(Weight::from_parts(55_976, 0).saturating_mul(l.into())) + // Standard Error: 3_964 + .saturating_add(Weight::from_parts(116_455, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Vesting Vesting (r:1 w:1) + /// Proof: Vesting Vesting (max_values: None, max_size: Some(1057), added: 3532, mode: MaxEncodedLen) + /// Storage: Balances Locks (r:1 w:1) + /// Proof: Balances Locks (max_values: None, max_size: Some(1299), added: 3774, mode: MaxEncodedLen) + /// Storage: Balances Freezes (r:1 w:0) + /// Proof: Balances Freezes (max_values: None, max_size: Some(49), added: 2524, mode: MaxEncodedLen) + /// Storage: System Account (r:1 w:1) + /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `449 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 42_102_000 picoseconds. + Weight::from_parts(41_671_515, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 2_743 + .saturating_add(Weight::from_parts(47_496, 0).saturating_mul(l.into())) + // Standard Error: 5_065 + .saturating_add(Weight::from_parts(95_785, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `555 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 41_497_000 picoseconds. + Weight::from_parts(38_763_834, 4764) + // Standard Error: 2_030 + .saturating_add(Weight::from_parts(99_580, 0).saturating_mul(l.into())) + // Standard Error: 3_750 + .saturating_add(Weight::from_parts(132_188, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_whitelist.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_whitelist.rs new file mode 100644 index 0000000000000..c5405c3710112 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_whitelist.rs @@ -0,0 +1,116 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_whitelist` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-o7yfgx5n-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --pallet=pallet_whitelist +// --chain=westend-dev +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_whitelist`. +pub struct WeightInfo(PhantomData); +impl pallet_whitelist::WeightInfo for WeightInfo { + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn whitelist_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `122` + // Estimated: `3556` + // Minimum execution time: 21_188_000 picoseconds. + Weight::from_parts(21_804_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn remove_whitelisted_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3556` + // Minimum execution time: 17_655_000 picoseconds. + Weight::from_parts(19_443_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 4194294]`. + fn dispatch_whitelisted_call(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `327 + n * (1 ±0)` + // Estimated: `3791 + n * (1 ±0)` + // Minimum execution time: 30_540_000 picoseconds. + Weight::from_parts(30_886_000, 0) + .saturating_add(Weight::from_parts(0, 3791)) + // Standard Error: 9 + .saturating_add(Weight::from_parts(1_779, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 10000]`. + fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3556` + // Minimum execution time: 21_082_000 picoseconds. + Weight::from_parts(21_922_294, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_412, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_xcm.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_xcm.rs new file mode 100644 index 0000000000000..98194b5f7c849 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_xcm.rs @@ -0,0 +1,417 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_xcm` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-12-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `c0a5c14955e4`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --extrinsic=* +// --chain=asset-hub-next-westend-dev +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 28_333_000 picoseconds. + Weight::from_parts(29_115_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 111_150_000 picoseconds. + Weight::from_parts(113_250_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: Some(105506), added: 107981, mode: `MaxEncodedLen`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `6196` + // Minimum execution time: 135_730_000 picoseconds. + Weight::from_parts(140_479_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Assets::Asset` (r:1 w:1) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `AssetsFreezer::FrozenBalances` (r:1 w:0) + /// Proof: `AssetsFreezer::FrozenBalances` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `571` + // Estimated: `6208` + // Minimum execution time: 174_654_000 picoseconds. + Weight::from_parts(182_260_000, 0) + .saturating_add(Weight::from_parts(0, 6208)) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn execute() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 12_750_000 picoseconds. + Weight::from_parts(13_124_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_083_000 picoseconds. + Weight::from_parts(7_353_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_default_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_254_000 picoseconds. + Weight::from_parts(2_408_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_subscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 34_983_000 picoseconds. + Weight::from_parts(35_949_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_unsubscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 38_226_000 picoseconds. + Weight::from_parts(39_353_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_254_000 picoseconds. + Weight::from_parts(2_432_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:6 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_supported_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `159` + // Estimated: `15999` + // Minimum execution time: 25_561_000 picoseconds. + Weight::from_parts(26_274_000, 0) + .saturating_add(Weight::from_parts(0, 15999)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:6 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notifiers() -> Weight { + // Proof Size summary in bytes: + // Measured: `163` + // Estimated: `16003` + // Minimum execution time: 25_950_000 picoseconds. + Weight::from_parts(26_532_000, 0) + .saturating_add(Weight::from_parts(0, 16003)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:7 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn already_notified_target() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `18488` + // Minimum execution time: 28_508_000 picoseconds. + Weight::from_parts(29_178_000, 0) + .saturating_add(Weight::from_parts(0, 18488)) + .saturating_add(T::DbWeight::get().reads(7)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn notify_current_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 33_244_000 picoseconds. + Weight::from_parts(33_946_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn notify_target_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `176` + // Estimated: `13541` + // Minimum execution time: 18_071_000 picoseconds. + Weight::from_parts(18_677_000, 0) + .saturating_add(Weight::from_parts(0, 13541)) + .saturating_add(T::DbWeight::get().reads(5)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notify_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `170` + // Estimated: `16010` + // Minimum execution time: 25_605_000 picoseconds. + Weight::from_parts(26_284_000, 0) + .saturating_add(Weight::from_parts(0, 16010)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:6 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn migrate_and_notify_old_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `16052` + // Minimum execution time: 46_991_000 picoseconds. + Weight::from_parts(47_866_000, 0) + .saturating_add(Weight::from_parts(0, 16052)) + .saturating_add(T::DbWeight::get().reads(12)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 5_685_000 picoseconds. + Weight::from_parts(5_816_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 31_271_000 picoseconds. + Weight::from_parts(32_195_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::ShouldRecordXcm` (r:1 w:0) + /// Proof: `PolkadotXcm::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + /// Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 43_530_000 picoseconds. + Weight::from_parts(44_942_000, 0) + .saturating_add(Weight::from_parts(0, 3625)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn add_authorized_alias() -> Weight { + Weight::from_parts(100_000, 0) + } + fn remove_authorized_alias() -> Weight { + Weight::from_parts(100_000, 0) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_xcm_bridge_hub_router.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_xcm_bridge_hub_router.rs new file mode 100644 index 0000000000000..a7ce8a64c22b6 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/pallet_xcm_bridge_hub_router.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_xcm_bridge_hub_router` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-12-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-acd6uxux-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-next-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_bridge_hub_router +// --chain=asset-hub-next-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_bridge_hub_router`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn on_initialize_when_non_congested() -> Weight { + // Proof Size summary in bytes: + // Measured: `259` + // Estimated: `5487` + // Minimum execution time: 14_643_000 picoseconds. + Weight::from_parts(14_992_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: Some(4002), added: 4497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: Some(1282), added: 1777, mode: `MaxEncodedLen`) + fn on_initialize_when_congested() -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `5487` + // Minimum execution time: 5_367_000 picoseconds. + Weight::from_parts(5_604_000, 0) + .saturating_add(Weight::from_parts(0, 5487)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) + /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) + fn report_bridge_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `1502` + // Minimum execution time: 12_562_000 picoseconds. + Weight::from_parts(12_991_000, 0) + .saturating_add(Weight::from_parts(0, 1502)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/paritydb_weights.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/paritydb_weights.rs new file mode 100644 index 0000000000000..25679703831a1 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/paritydb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights + /// are available for brave runtime engineers who may want to try this out as default. + pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::ParityDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/rocksdb_weights.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/rocksdb_weights.rs new file mode 100644 index 0000000000000..3dd817aa6f137 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/rocksdb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout + /// the runtime. + pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::RocksDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/mod.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/mod.rs new file mode 100644 index 0000000000000..c83b6b91f9054 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/mod.rs @@ -0,0 +1,275 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod pallet_xcm_benchmarks_fungible; +mod pallet_xcm_benchmarks_generic; + +use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; +use alloc::vec::Vec; +use frame_support::weights::Weight; +use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; +use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; +use xcm::{ + latest::{prelude::*, AssetTransferFilter}, + DoubleEncoded, +}; + +trait WeighAssets { + fn weigh_assets(&self, weight: Weight) -> Weight; +} + +const MAX_ASSETS: u64 = 100; + +impl WeighAssets for AssetFilter { + fn weigh_assets(&self, weight: Weight) -> Weight { + match self { + Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), + Self::Wild(asset) => match asset { + All => weight.saturating_mul(MAX_ASSETS), + AllOf { fun, .. } => match fun { + WildFungibility::Fungible => weight, + // Magic number 2 has to do with the fact that we could have up to 2 times + // MaxAssetsIntoHolding in the worst-case scenario. + WildFungibility::NonFungible => + weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), + }, + AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + }, + } + } +} + +impl WeighAssets for Assets { + fn weigh_assets(&self, weight: Weight) -> Weight { + weight.saturating_mul(self.inner().iter().count() as u64) + } +} + +pub struct AssetHubNextWestendXcmWeight(core::marker::PhantomData); +impl XcmWeightInfo for AssetHubNextWestendXcmWeight { + fn withdraw_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::withdraw_asset()) + } + fn reserve_asset_deposited(assets: &Assets) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::reserve_asset_deposited()) + } + fn receive_teleported_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::receive_teleported_asset()) + } + fn query_response( + _query_id: &u64, + _response: &Response, + _max_weight: &Weight, + _querier: &Option, + ) -> Weight { + XcmGeneric::::query_response() + } + fn transfer_asset(assets: &Assets, _dest: &Location) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::transfer_asset()) + } + fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::transfer_reserve_asset()) + } + fn transact( + _origin_type: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { + XcmGeneric::::transact() + } + fn hrmp_new_channel_open_request( + _sender: &u32, + _max_message_size: &u32, + _max_capacity: &u32, + ) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_accepted(_recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn clear_origin() -> Weight { + XcmGeneric::::clear_origin() + } + fn descend_origin(_who: &InteriorLocation) -> Weight { + XcmGeneric::::descend_origin() + } + fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_error() + } + + fn deposit_asset(assets: &AssetFilter, _dest: &Location) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::deposit_asset()) + } + fn deposit_reserve_asset(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::deposit_reserve_asset()) + } + fn exchange_asset(_give: &AssetFilter, _receive: &Assets, _maximal: &bool) -> Weight { + Weight::MAX + } + fn initiate_reserve_withdraw( + assets: &AssetFilter, + _reserve: &Location, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) + } + fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) + } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &BoundedVec, + _xcm: &Xcm<()>, + ) -> Weight { + let base_weight = XcmFungibleWeight::::initiate_transfer(); + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(base_weight) + } else { + base_weight + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmFungibleWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } + fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { + XcmGeneric::::report_holding() + } + fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { + XcmGeneric::::buy_execution() + } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } + fn refund_surplus() -> Weight { + XcmGeneric::::refund_surplus() + } + fn set_error_handler(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_error_handler() + } + fn set_appendix(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_appendix() + } + fn clear_error() -> Weight { + XcmGeneric::::clear_error() + } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } + fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { + XcmGeneric::::claim_asset() + } + fn trap(_code: &u64) -> Weight { + XcmGeneric::::trap() + } + fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { + XcmGeneric::::subscribe_version() + } + fn unsubscribe_version() -> Weight { + XcmGeneric::::unsubscribe_version() + } + fn burn_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmGeneric::::burn_asset()) + } + fn expect_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmGeneric::::expect_asset()) + } + fn expect_origin(_origin: &Option) -> Weight { + XcmGeneric::::expect_origin() + } + fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { + XcmGeneric::::expect_error() + } + fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { + XcmGeneric::::expect_transact_status() + } + fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::query_pallet() + } + fn expect_pallet( + _index: &u32, + _name: &Vec, + _module_name: &Vec, + _crate_major: &u32, + _min_crate_minor: &u32, + ) -> Weight { + XcmGeneric::::expect_pallet() + } + fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_transact_status() + } + fn clear_transact_status() -> Weight { + XcmGeneric::::clear_transact_status() + } + fn universal_origin(_: &Junction) -> Weight { + XcmGeneric::::universal_origin() + } + fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { + Weight::MAX + } + fn lock_asset(_: &Asset, _: &Location) -> Weight { + Weight::MAX + } + fn unlock_asset(_: &Asset, _: &Location) -> Weight { + Weight::MAX + } + fn note_unlockable(_: &Asset, _: &Location) -> Weight { + Weight::MAX + } + fn request_unlock(_: &Asset, _: &Location) -> Weight { + Weight::MAX + } + fn set_fees_mode(_: &bool) -> Weight { + XcmGeneric::::set_fees_mode() + } + fn set_topic(_topic: &[u8; 32]) -> Weight { + XcmGeneric::::set_topic() + } + fn clear_topic() -> Weight { + XcmGeneric::::clear_topic() + } + fn alias_origin(_: &Location) -> Weight { + XcmGeneric::::alias_origin() + } + fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { + XcmGeneric::::unpaid_execution() + } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 0000000000000..99da636e8afa8 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,224 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-10-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-wmcgzesc-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-next-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=asset-hub-next-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/xcm/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weights for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 32_698_000 picoseconds. + Weight::from_parts(33_530_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `6196` + // Minimum execution time: 41_485_000 picoseconds. + Weight::from_parts(41_963_000, 6196) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + // Storage: `System::Account` (r:3 w:3) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `8799` + // Minimum execution time: 104_952_000 picoseconds. + Weight::from_parts(108_211_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(5)) + } + pub fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_154_000 picoseconds. + Weight::from_parts(1_238_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 111_509_000 picoseconds. + Weight::from_parts(114_476_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_572_000 picoseconds. + Weight::from_parts(2_809_000, 0) + } + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 25_570_000 picoseconds. + Weight::from_parts(25_933_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6196` + // Minimum execution time: 86_148_000 picoseconds. + Weight::from_parts(88_170_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 55_051_000 picoseconds. + Weight::from_parts(56_324_000, 3610) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `6196` + // Minimum execution time: 90_155_000 picoseconds. + Weight::from_parts(91_699_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 0000000000000..b9290d602cda2 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,384 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-10-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-wmcgzesc-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-next-westend-dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=asset-hub-next-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/assets/asset-hub-next-westend/src/weights/xcm/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weights for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 103_794_000 picoseconds. + Weight::from_parts(106_697_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 621_000 picoseconds. + Weight::from_parts(705_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_580_000 picoseconds. + Weight::from_parts(5_950_000, 0) + } + pub fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 598_000 picoseconds. + Weight::from_parts(700_000, 0) + } + // Storage: `PolkadotXcm::Queries` (r:1 w:0) + // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3568` + // Minimum execution time: 8_186_000 picoseconds. + Weight::from_parts(8_753_000, 3568) + .saturating_add(T::DbWeight::get().reads(1)) + } + pub fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_924_000 picoseconds. + Weight::from_parts(7_315_000, 0) + } + pub fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_731_000 picoseconds. + Weight::from_parts(2_828_000, 0) + } + pub fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(723_000, 0) + } + pub fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 648_000 picoseconds. + Weight::from_parts(730_000, 0) + } + pub fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 628_000 picoseconds. + Weight::from_parts(697_000, 0) + } + pub fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 714_000 picoseconds. + Weight::from_parts(775_000, 0) + } + pub fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 666_000 picoseconds. + Weight::from_parts(717_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 70_263_000 picoseconds. + Weight::from_parts(71_266_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `160` + // Estimated: `3625` + // Minimum execution time: 13_079_000 picoseconds. + Weight::from_parts(13_569_000, 3625) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 630_000 picoseconds. + Weight::from_parts(710_000, 0) + } + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_042_000 picoseconds. + Weight::from_parts(29_633_000, 3610) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) + } + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_601_000 picoseconds. + Weight::from_parts(2_855_000, 0) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 23_696_000 picoseconds. + Weight::from_parts(24_427_000, 0) + } + pub fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_687_000 picoseconds. + Weight::from_parts(6_820_000, 0) + } + pub fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 653_000 picoseconds. + Weight::from_parts(728_000, 0) + } + pub fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 668_000 picoseconds. + Weight::from_parts(721_000, 0) + } + pub fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 832_000 picoseconds. + Weight::from_parts(900_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 75_131_000 picoseconds. + Weight::from_parts(77_142_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_820_000 picoseconds. + Weight::from_parts(5_089_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + // Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `246` + // Estimated: `6196` + // Minimum execution time: 70_079_000 picoseconds. + Weight::from_parts(71_762_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 722_000 picoseconds. + Weight::from_parts(784_000, 0) + } + pub fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 613_000 picoseconds. + Weight::from_parts(674_000, 0) + } + pub fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 608_000 picoseconds. + Weight::from_parts(683_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + pub fn universal_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 2_466_000 picoseconds. + Weight::from_parts(2_705_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) + } + pub fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 623_000 picoseconds. + Weight::from_parts(687_000, 0) + } + pub fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 673_000 picoseconds. + Weight::from_parts(752_000, 0) + } + pub fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 638_000 picoseconds. + Weight::from_parts(708_000, 0) + } + pub fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(776_000, 0) + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/src/xcm_config.rs b/substrate/frame/staking-async/runtimes/parachain/src/xcm_config.rs new file mode 100644 index 0000000000000..bd301ac0e15a8 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/src/xcm_config.rs @@ -0,0 +1,792 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, BaseDeliveryFee, + CollatorSelection, FeeAssetId, FellowshipAdmin, ForeignAssets, ForeignAssetsInstance, + GeneralAdmin, ParachainInfo, ParachainSystem, PolkadotXcm, PoolAssets, Runtime, RuntimeCall, + RuntimeEvent, RuntimeOrigin, StakingAdmin, ToRococoXcmRouter, TransactionByteFee, Treasurer, + TrustBackedAssetsInstance, Uniques, WeightToFee, XcmpQueue, +}; +use assets_common::{ + matching::{FromSiblingParachain, IsForeignConcreteAsset, ParentLocation}, + TrustBackedAssetsAsLocation, +}; +use frame_support::{ + parameter_types, + traits::{ + fungible::HoldConsideration, + tokens::imbalance::{ResolveAssetTo, ResolveTo}, + ConstU32, Contains, Equals, Everything, LinearStoragePrice, PalletInfoAccess, + }, +}; +use frame_system::EnsureRoot; +use pallet_xcm::XcmPassthrough; +use parachains_common::{ + xcm_config::{ + AllSiblingSystemParachains, AssetFeeAsExistentialDepositMultiplier, + ConcreteAssetFromSystem, RelayOrOtherSystemParachains, + }, + TREASURY_PALLET_ID, +}; +use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use sp_runtime::traits::{AccountIdConversion, ConvertInto, TryConvertInto}; +use westend_runtime_constants::{ + system_parachain::COLLECTIVES_ID, xcm::body::FELLOWSHIP_ADMIN_INDEX, +}; +use xcm::latest::{prelude::*, ROCOCO_GENESIS_HASH, WESTEND_GENESIS_HASH}; +use xcm_builder::{ + AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, + AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, + DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, + IsConcrete, LocalMint, MatchedConvertedConcreteId, NetworkExportTableItem, NoChecking, + NonFungiblesAdapter, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SendXcmFeeToAccount, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SingleAssetExchangeAdapter, + SovereignPaidRemoteExporter, SovereignSignedViaLocation, StartsWith, + StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WeightInfoBounds, WithComputedOrigin, WithLatestLocationConverter, WithUniqueTopic, + XcmFeeManagerFromComponents, +}; +use xcm_executor::XcmExecutor; + +parameter_types! { + pub const RootLocation: Location = Location::here(); + pub const WestendLocation: Location = Location::parent(); + pub const RelayNetwork: Option = Some(NetworkId::ByGenesis(WESTEND_GENESIS_HASH)); + pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub UniversalLocation: InteriorLocation = + [GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())].into(); + pub UniversalLocationNetworkId: NetworkId = UniversalLocation::get().global_consensus().unwrap(); + pub TrustBackedAssetsPalletLocation: Location = + PalletInstance(TrustBackedAssetsPalletIndex::get()).into(); + pub TrustBackedAssetsPalletIndex: u8 = ::index() as u8; + pub ForeignAssetsPalletLocation: Location = + PalletInstance(::index() as u8).into(); + pub PoolAssetsPalletLocation: Location = + PalletInstance(::index() as u8).into(); + pub UniquesPalletLocation: Location = + PalletInstance(::index() as u8).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub StakingPot: AccountId = CollatorSelection::account_id(); + pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); + pub RelayTreasuryLocation: Location = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); +} + +/// Type for specifying how a `Location` can be converted into an `AccountId`. This is used +/// when determining ownership of accounts for asset transacting and when attempting to use XCM +/// `Transact` in order to determine the dispatch Origin. +pub type LocationToAccountId = ( + // The parent (Relay-chain) origin converts to the parent `AccountId`. + ParentIsPreset, + // Sibling parachain origins convert to AccountId via the `ParaId::into`. + SiblingParachainConvertsVia, + // Straight up local `AccountId32` origins just alias directly to `AccountId`. + AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, + // Different global consensus parachain sovereign account. + // (Used for over-bridge transfers and reserve processing) + GlobalConsensusParachainConvertsFor, +); + +/// Means for transacting the native currency on this chain. +pub type FungibleTransactor = FungibleAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // Convert an XCM Location into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't track any teleports of `Balances`. + (), +>; + +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. +pub type TrustBackedAssetsConvertedConcreteId = + assets_common::TrustBackedAssetsConvertedConcreteId; + +/// Means for transacting assets besides the native currency on this chain. +pub type FungiblesTransactor = FungiblesAdapter< + // Use this fungibles implementation: + Assets, + // Use this currency when it is a fungible asset matching the given location or name: + TrustBackedAssetsConvertedConcreteId, + // Convert an XCM Location into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We only want to allow teleports of known assets. We use non-zero issuance as an indication + // that this asset is known. + LocalMint>, + // The account to use for tracking teleports. + CheckingAccount, +>; + +/// Matcher for converting `ClassId`/`InstanceId` into a uniques asset. +pub type UniquesConvertedConcreteId = + assets_common::UniquesConvertedConcreteId; + +/// Means for transacting unique assets. +pub type UniquesTransactor = NonFungiblesAdapter< + // Use this non-fungibles implementation: + Uniques, + // This adapter will handle any non-fungible asset from the uniques pallet. + UniquesConvertedConcreteId, + // Convert an XCM Location into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // Does not check teleports. + NoChecking, + // The account to use for tracking teleports. + CheckingAccount, +>; + +/// `AssetId`/`Balance` converter for `ForeignAssets`. +pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< + ( + // Ignore `TrustBackedAssets` explicitly + StartsWith, + // Ignore asset which starts explicitly with our `GlobalConsensus(NetworkId)`, means: + // - foreign assets from our consensus should be: `Location {parents: 1, X*(Parachain(xyz), + // ..)} + // - foreign assets outside our consensus with the same `GlobalConsensus(NetworkId)` wont + // be accepted here + StartsWithExplicitGlobalConsensus, + ), + Balance, + xcm::v5::Location, +>; + +/// Means for transacting foreign assets from different global consensus. +pub type ForeignFungiblesTransactor = FungiblesAdapter< + // Use this fungibles implementation: + ForeignAssets, + // Use this currency when it is a fungible asset matching the given location or name: + ForeignAssetsConvertedConcreteId, + // Convert an XCM Location into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't need to check teleports here. + NoChecking, + // The account to use for tracking teleports. + CheckingAccount, +>; + +/// `AssetId`/`Balance` converter for `PoolAssets`. +pub type PoolAssetsConvertedConcreteId = + assets_common::PoolAssetsConvertedConcreteId; + +/// Means for transacting asset conversion pool assets on this chain. +pub type PoolFungiblesTransactor = FungiblesAdapter< + // Use this fungibles implementation: + PoolAssets, + // Use this currency when it is a fungible asset matching the given location or name: + PoolAssetsConvertedConcreteId, + // Convert an XCM Location into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We only want to allow teleports of known assets. We use non-zero issuance as an indication + // that this asset is known. + LocalMint>, + // The account to use for tracking teleports. + CheckingAccount, +>; + +/// Means for transacting assets on this chain. +pub type AssetTransactors = ( + FungibleTransactor, + FungiblesTransactor, + ForeignFungiblesTransactor, + PoolFungiblesTransactor, + UniquesTransactor, +); + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// biases the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, + // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when + // recognised. + RelayChainAsNative, + // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when + // recognised. + SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, + // Native signed account converter; this just converts an `AccountId32` origin into a normal + // `RuntimeOrigin::Signed` origin of the same 32-byte value. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +parameter_types! { + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + pub XcmAssetFeesReceiver: Option = Authorship::author(); +} + +pub struct ParentOrParentsPlurality; +impl Contains for ParentOrParentsPlurality { + fn contains(location: &Location) -> bool { + matches!(location.unpack(), (1, []) | (1, [Plurality { .. }])) + } +} + +pub struct FellowshipEntities; +impl Contains for FellowshipEntities { + fn contains(location: &Location) -> bool { + matches!( + location.unpack(), + (1, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) | + (1, [Parachain(COLLECTIVES_ID), PalletInstance(64)]) | + (1, [Parachain(COLLECTIVES_ID), PalletInstance(65)]) + ) + } +} + +pub struct LocalPlurality; +impl Contains for LocalPlurality { + fn contains(loc: &Location) -> bool { + matches!(loc.unpack(), (0, [Plurality { .. }])) + } +} + +pub struct AmbassadorEntities; +impl Contains for AmbassadorEntities { + fn contains(location: &Location) -> bool { + matches!(location.unpack(), (1, [Parachain(COLLECTIVES_ID), PalletInstance(74)])) + } +} + +pub type Barrier = TrailingSetTopicAsId< + DenyThenTry< + DenyReserveTransferToRelayChain, + ( + TakeWeightCredit, + // Expected responses are OK. + AllowKnownQueryResponses, + // Allow XCMs with some computed origins to pass through. + WithComputedOrigin< + ( + // If the message is one that immediately attempts to pay for execution, then + // allow it. + AllowTopLevelPaidExecutionFrom, + // Parent, its pluralities (i.e. governance bodies), relay treasury pallet and + // sibling parachains get free execution. + AllowExplicitUnpaidExecutionFrom<( + ParentOrParentsPlurality, + Equals, + RelayOrOtherSystemParachains, + FellowshipEntities, + AmbassadorEntities, + )>, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, + ), + UniversalLocation, + ConstU32<8>, + >, + ), + >, +>; + +// TODO: This calls into the Assets pallet's default `BalanceToAssetBalance` implementation, which +// uses the ratio of minimum balances and requires asset sufficiency. This means that purchasing +// weight within XCM programs will still use the old way, and paying fees via asset conversion will +// only be possible when transacting locally. We should add an impl of this trait that does asset +// conversion. +pub type AssetFeeAsExistentialDepositMultiplierFeeCharger = AssetFeeAsExistentialDepositMultiplier< + Runtime, + WeightToFee, + pallet_assets::BalanceToAssetBalance, + TrustBackedAssetsInstance, +>; + +/// Multiplier used for dedicated `TakeFirstAssetTrader` with `ForeignAssets` instance. +pub type ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger = + AssetFeeAsExistentialDepositMultiplier< + Runtime, + WeightToFee, + pallet_assets::BalanceToAssetBalance, + ForeignAssetsInstance, + >; + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = ( + Equals, + RelayOrOtherSystemParachains, + Equals, + FellowshipEntities, + AmbassadorEntities, + LocalPlurality, +); + +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// +/// - WND with the parent Relay Chain and sibling system parachains; and +/// - Sibling parachains' assets from where they originate (as `ForeignCreators`). +pub type TrustedTeleporters = ( + ConcreteAssetFromSystem, + IsForeignConcreteAsset>>, +); + +/// Asset converter for pool assets. +/// Used to convert one asset to another, when there is a pool available between the two. +/// This type thus allows paying fees with any asset as long as there is a pool between said +/// asset and the asset required for fee payment. +pub type PoolAssetsExchanger = SingleAssetExchangeAdapter< + crate::AssetConversion, + crate::NativeAndNonPoolAssets, + ( + TrustBackedAssetsAsLocation, + ForeignAssetsConvertedConcreteId, + // `ForeignAssetsConvertedConcreteId` excludes the relay token, so we add it back here. + MatchedConvertedConcreteId< + xcm::v5::Location, + Balance, + Equals, + WithLatestLocationConverter, + TryConvertInto, + >, + ), + AccountId, +>; + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type XcmEventEmitter = PolkadotXcm; + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = XcmOriginToTransactDispatchOrigin; + // Asset Hub trusts only particular, pre-configured bridged locations from a different consensus + // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being + // held). On Westend Asset Hub, we allow Rococo Asset Hub to act as reserve for any asset native + // to the Rococo or Ethereum ecosystems. + type IsReserve = ( + bridging::to_rococo::RococoAssetFromAssetHubRococo, + bridging::to_ethereum::EthereumAssetFromEthereum, + ); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = WeightInfoBounds< + crate::weights::xcm::AssetHubNextWestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; + type Trader = ( + UsingComponents< + WeightToFee, + WestendLocation, + AccountId, + Balances, + ResolveTo, + >, + cumulus_primitives_utility::SwapFirstAssetTrader< + WestendLocation, + crate::AssetConversion, + WeightToFee, + crate::NativeAndNonPoolAssets, + ( + TrustBackedAssetsAsLocation< + TrustBackedAssetsPalletLocation, + Balance, + xcm::v5::Location, + >, + ForeignAssetsConvertedConcreteId, + ), + ResolveAssetTo, + AccountId, + >, + // This trader allows to pay with `is_sufficient=true` "Trust Backed" assets from dedicated + // `pallet_assets` instance - `Assets`. + cumulus_primitives_utility::TakeFirstAssetTrader< + AccountId, + AssetFeeAsExistentialDepositMultiplierFeeCharger, + TrustBackedAssetsConvertedConcreteId, + Assets, + cumulus_primitives_utility::XcmFeesTo32ByteAccount< + FungiblesTransactor, + AccountId, + XcmAssetFeesReceiver, + >, + >, + // This trader allows to pay with `is_sufficient=true` "Foreign" assets from dedicated + // `pallet_assets` instance - `ForeignAssets`. + cumulus_primitives_utility::TakeFirstAssetTrader< + AccountId, + ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger, + ForeignAssetsConvertedConcreteId, + ForeignAssets, + cumulus_primitives_utility::XcmFeesTo32ByteAccount< + ForeignFungiblesTransactor, + AccountId, + XcmAssetFeesReceiver, + >, + >, + ); + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type AssetLocker = (); + type AssetExchanger = PoolAssetsExchanger; + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + SendXcmFeeToAccount, + >; + type MessageExporter = (); + type UniversalAliases = + (bridging::to_rococo::UniversalAliases, bridging::to_ethereum::UniversalAliases); + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + // We allow any origin to alias into a child sub-location (equivalent to DescendOrigin). + type Aliasers = AliasChildLocation; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = PolkadotXcm; +} + +parameter_types! { + // `GeneralAdmin` pluralistic body. + pub const GeneralAdminBodyId: BodyId = BodyId::Administration; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; + // FellowshipAdmin pluralistic body. + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); + // `Treasurer` pluralistic body. + pub const TreasurerBodyId: BodyId = BodyId::Treasury; +} + +/// Type to convert the `GeneralAdmin` origin to a Plurality `Location` value. +pub type GeneralAdminToPlurality = + OriginToPluralityVoice; + +/// Local origins on this chain are allowed to dispatch XCM sends/executions. +pub type LocalOriginToLocation = + (GeneralAdminToPlurality, SignedToAccountId32); + +/// Type to convert the `StakingAdmin` origin to a Plurality `Location` value. +pub type StakingAdminToPlurality = + OriginToPluralityVoice; + +/// Type to convert the `FellowshipAdmin` origin to a Plurality `Location` value. +pub type FellowshipAdminToPlurality = + OriginToPluralityVoice; + +/// Type to convert the `Treasurer` origin to a Plurality `Location` value. +pub type TreasurerToPlurality = OriginToPluralityVoice; + +/// Type to convert a pallet `Origin` type value into a `Location` value which represents an +/// interior location of this chain for a destination chain. +pub type LocalPalletOriginToLocation = ( + // GeneralAdmin origin to be used in XCM as a corresponding Plurality `Location` value. + GeneralAdminToPlurality, + // StakingAdmin origin to be used in XCM as a corresponding Plurality `Location` value. + StakingAdminToPlurality, + // FellowshipAdmin origin to be used in XCM as a corresponding Plurality `Location` value. + FellowshipAdminToPlurality, + // `Treasurer` origin to be used in XCM as a corresponding Plurality `Location` value. + TreasurerToPlurality, +); + +pub type PriceForParentDelivery = + ExponentialPrice; + +/// For routing XCM messages which do not cross local consensus boundary. +type LocalXcmRouter = ( + // Two routers - use UMP to communicate with the relay chain: + cumulus_primitives_utility::ParentAsUmp, + // ..and XCMP to communicate with the sibling chains. + XcmpQueue, +); + +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = WithUniqueTopic<( + LocalXcmRouter, + // Router which wraps and sends xcm to BridgeHub to be delivered to the Rococo + // GlobalConsensus + ToRococoXcmRouter, + // Router which wraps and sends xcm to BridgeHub to be delivered to the Ethereum + // GlobalConsensus + SovereignPaidRemoteExporter< + bridging::to_ethereum::EthereumNetworkExportTable, + XcmpQueue, + UniversalLocation, + >, +)>; + +parameter_types! { + pub Collectives: Location = Parachain(COLLECTIVES_ID).into_location(); + + pub const DepositPerItem: Balance = crate::deposit(1, 0); + pub const DepositPerByte: Balance = crate::deposit(0, 1); + pub const AuthorizeAliasHoldReason: crate::RuntimeHoldReason = crate::RuntimeHoldReason::PolkadotXcm(pallet_xcm::HoldReason::AuthorizeAlias); +} + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = WeightInfoBounds< + crate::weights::xcm::AssetHubNextWestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = (); + type SovereignAccountOf = LocationToAccountId; + type MaxLockers = ConstU32<8>; + type WeightInfo = crate::weights::pallet_xcm::WeightInfo; + type AdminOrigin = EnsureRoot; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type AuthorizedAliasConsideration = HoldConsideration< + AccountId, + Balances, + AuthorizeAliasHoldReason, + LinearStoragePrice, + >; +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. +pub struct XcmBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> xcm::v5::Location { + xcm::v5::Location::new(1, [xcm::v5::Junction::Parachain(id)]) + } +} + +/// All configuration related to bridging +pub mod bridging { + use super::*; + use alloc::collections::btree_set::BTreeSet; + use assets_common::matching; + + parameter_types! { + /// Base price of every byte of the Westend -> Rococo message. Can be adjusted via + /// governance `set_storage` call. + /// + /// Default value is our estimation of the: + /// + /// 1) an approximate cost of XCM execution (`ExportMessage` and surroundings) at Westend bridge hub; + /// + /// 2) the approximate cost of Westend -> Rococo message delivery transaction on Rococo Bridge Hub, + /// converted into WNDs using 1:1 conversion rate; + /// + /// 3) the approximate cost of Westend -> Rococo message confirmation transaction on Westend Bridge Hub. + pub storage XcmBridgeHubRouterBaseFee: Balance = + bp_bridge_hub_westend::BridgeHubWestendBaseXcmFeeInWnds::get() + .saturating_add(bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get()) + .saturating_add(bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds::get()); + /// Price of every byte of the Westend -> Rococo message. Can be adjusted via + /// governance `set_storage` call. + pub storage XcmBridgeHubRouterByteFee: Balance = TransactionByteFee::get(); + + pub SiblingBridgeHubParaId: u32 = bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID; + pub SiblingBridgeHub: Location = Location::new(1, [Parachain(SiblingBridgeHubParaId::get())]); + /// Router expects payment with this `AssetId`. + /// (`AssetId` has to be aligned with `BridgeTable`) + pub XcmBridgeHubRouterFeeAssetId: AssetId = WestendLocation::get().into(); + + pub BridgeTable: alloc::vec::Vec = + alloc::vec::Vec::new().into_iter() + .chain(to_rococo::BridgeTable::get()) + .collect(); + } + + pub type NetworkExportTable = xcm_builder::NetworkExportTable; + + pub mod to_rococo { + use super::*; + + parameter_types! { + pub SiblingBridgeHubWithBridgeHubRococoInstance: Location = Location::new( + 1, + [ + Parachain(SiblingBridgeHubParaId::get()), + PalletInstance(bp_bridge_hub_westend::WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX) + ] + ); + + pub const RococoNetwork: NetworkId = NetworkId::ByGenesis(ROCOCO_GENESIS_HASH); + pub RococoEcosystem: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get())]); + pub RocLocation: Location = Location::new(2, [GlobalConsensus(RococoNetwork::get())]); + pub AssetHubRococo: Location = Location::new(2, [ + GlobalConsensus(RococoNetwork::get()), + Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID) + ]); + + /// Set up exporters configuration. + /// `Option` represents static "base fee" which is used for total delivery fee calculation. + pub BridgeTable: alloc::vec::Vec = alloc::vec![ + NetworkExportTableItem::new( + RococoNetwork::get(), + Some(alloc::vec![ + AssetHubRococo::get().interior.split_global().expect("invalid configuration for AssetHubRococo").1, + ]), + SiblingBridgeHub::get(), + // base delivery fee to local `BridgeHub` + Some(( + XcmBridgeHubRouterFeeAssetId::get(), + XcmBridgeHubRouterBaseFee::get(), + ).into()) + ) + ]; + + /// Universal aliases + pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( + alloc::vec![ + (SiblingBridgeHubWithBridgeHubRococoInstance::get(), GlobalConsensus(RococoNetwork::get())) + ] + ); + } + + impl Contains<(Location, Junction)> for UniversalAliases { + fn contains(alias: &(Location, Junction)) -> bool { + UniversalAliases::get().contains(alias) + } + } + + /// Allow any asset native to the Rococo ecosystem if it comes from Rococo Asset Hub. + pub type RococoAssetFromAssetHubRococo = + matching::RemoteAssetFromLocation, AssetHubRococo>; + } + + pub mod to_ethereum { + use super::*; + use assets_common::matching::FromNetwork; + use sp_std::collections::btree_set::BTreeSet; + use testnet_parachains_constants::westend::snowbridge::{ + EthereumNetwork, INBOUND_QUEUE_PALLET_INDEX_V1, + }; + + parameter_types! { + /// User fee for ERC20 token transfer back to Ethereum. + /// (initially was calculated by test `OutboundQueue::calculate_fees` - ETH/WND 1/400 and fee_per_gas 20 GWEI = 2200698000000 + *25%) + /// Needs to be more than fee calculated from DefaultFeeConfig FeeConfigRecord in snowbridge:parachain/pallets/outbound-queue/src/lib.rs + /// Polkadot uses 10 decimals, Kusama,Rococo,Westend 12 decimals. + pub const DefaultBridgeHubEthereumBaseFee: Balance = 2_750_872_500_000; + pub storage BridgeHubEthereumBaseFee: Balance = DefaultBridgeHubEthereumBaseFee::get(); + pub SiblingBridgeHubWithEthereumInboundQueueInstance: Location = Location::new( + 1, + [ + Parachain(SiblingBridgeHubParaId::get()), + PalletInstance(INBOUND_QUEUE_PALLET_INDEX_V1) + ] + ); + + /// Set up exporters configuration. + /// `Option` represents static "base fee" which is used for total delivery fee calculation. + pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ + NetworkExportTableItem::new( + EthereumNetwork::get().into(), + Some(sp_std::vec![Junctions::Here]), + SiblingBridgeHub::get(), + Some(( + XcmBridgeHubRouterFeeAssetId::get(), + BridgeHubEthereumBaseFee::get(), + ).into()) + ), + ]; + + /// Universal aliases + pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( + sp_std::vec![ + (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get().into())), + ] + ); + + pub EthereumBridgeTable: sp_std::vec::Vec = sp_std::vec::Vec::new().into_iter() + .chain(BridgeTable::get()) + .collect(); + } + + pub type EthereumNetworkExportTable = xcm_builder::NetworkExportTable; + + pub type EthereumAssetFromEthereum = + IsForeignConcreteAsset>; + + impl Contains<(Location, Junction)> for UniversalAliases { + fn contains(alias: &(Location, Junction)) -> bool { + UniversalAliases::get().contains(alias) + } + } + } + + /// Benchmarks helper for bridging configuration. + #[cfg(feature = "runtime-benchmarks")] + pub struct BridgingBenchmarksHelper; + + #[cfg(feature = "runtime-benchmarks")] + impl BridgingBenchmarksHelper { + pub fn prepare_universal_alias() -> Option<(Location, Junction)> { + let alias = + to_rococo::UniversalAliases::get().into_iter().find_map(|(location, junction)| { + match to_rococo::SiblingBridgeHubWithBridgeHubRococoInstance::get() + .eq(&location) + { + true => Some((location, junction)), + false => None, + } + }); + Some(alias.expect("we expect here BridgeHubWestend to Rococo mapping at least")) + } + } +} diff --git a/substrate/frame/staking-async/runtimes/parachain/zombienet-staking-runtimes.toml b/substrate/frame/staking-async/runtimes/parachain/zombienet-staking-runtimes.toml new file mode 100644 index 0000000000000..8cf6c5946dcf4 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/parachain/zombienet-staking-runtimes.toml @@ -0,0 +1,27 @@ +[relaychain] +default_command = "polkadot" +chain_spec_path = "./rc.json" + +[[relaychain.nodes]] +name = "alice" +validator = true +rpc_port = 9944 + +[[relaychain.nodes]] +name = "bob" +validator = true +rpc_port = 9955 +args = [ + "-lruntime::system=debug,runtime::session=trace,runtime::staking::ah-client=trace", +] + +[[parachains]] +id = 1100 +chain_spec_path = "./parachain.json" + +[parachains.collator] +name = "charlie" +rpc_port = 9966 +args = [ + "-lruntime::system=debug,runtime::multiblock-election=debug,runtime::staking=debug,runtime::staking::rc-client=trace", +] diff --git a/substrate/frame/staking-async/runtimes/rc/Cargo.toml b/substrate/frame/staking-async/runtimes/rc/Cargo.toml new file mode 100644 index 0000000000000..53050bfd22ed8 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/Cargo.toml @@ -0,0 +1,371 @@ +[package] +name = "pallet-staking-async-rc-runtime" +build = "build.rs" +version = "7.0.0" +description = "Staking Async testnet Relay Chain runtime." +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +bitvec = { features = ["alloc"], workspace = true } +codec = { features = ["derive", "max-encoded-len"], workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } +serde = { workspace = true } +serde_derive = { optional = true, workspace = true } +serde_json = { features = ["alloc"], workspace = true } +smallvec = { workspace = true, default-features = true } + +binary-merkle-tree = { workspace = true } +sp-api = { workspace = true } +sp-application-crypto = { workspace = true } +sp-arithmetic = { workspace = true } +sp-authority-discovery = { workspace = true } +sp-block-builder = { workspace = true } +sp-consensus-babe = { workspace = true } +sp-consensus-beefy = { workspace = true } +sp-consensus-grandpa = { workspace = true } +sp-core = { workspace = true } +sp-genesis-builder = { workspace = true } +sp-inherents = { workspace = true } +sp-io = { workspace = true } +sp-keyring = { workspace = true } +sp-mmr-primitives = { workspace = true } +sp-npos-elections = { workspace = true } +sp-offchain = { workspace = true } +sp-runtime = { workspace = true } +sp-session = { workspace = true } +sp-staking = { workspace = true } +sp-storage = { workspace = true } +sp-transaction-pool = { workspace = true } +sp-version = { workspace = true } + +frame-election-provider-support = { workspace = true } +frame-executive = { workspace = true } +frame-metadata-hash-extension = { workspace = true } +frame-support = { features = ["experimental", "tuples-96"], workspace = true } +frame-system = { workspace = true } +frame-system-rpc-runtime-api = { workspace = true } +pallet-asset-rate = { workspace = true } +pallet-authority-discovery = { workspace = true } +pallet-authorship = { workspace = true } +pallet-babe = { workspace = true } +pallet-bags-list = { workspace = true } +pallet-balances = { workspace = true } +pallet-beefy = { workspace = true } +pallet-beefy-mmr = { workspace = true } +pallet-conviction-voting = { workspace = true } +pallet-delegated-staking = { workspace = true } +pallet-election-provider-multi-phase = { workspace = true } +pallet-elections-phragmen = { workspace = true } +pallet-fast-unstake = { workspace = true } +pallet-grandpa = { workspace = true } +pallet-identity = { workspace = true } +pallet-indices = { workspace = true } +pallet-membership = { workspace = true } +pallet-message-queue = { workspace = true } +pallet-migrations = { workspace = true } +pallet-mmr = { workspace = true } +pallet-multisig = { workspace = true } +pallet-nomination-pools = { workspace = true } +pallet-nomination-pools-runtime-api = { workspace = true } +pallet-offences = { workspace = true } +pallet-parameters = { workspace = true } +pallet-preimage = { workspace = true } +pallet-proxy = { workspace = true } +pallet-recovery = { workspace = true } +pallet-referenda = { workspace = true } +pallet-root-testing = { workspace = true } +pallet-scheduler = { workspace = true } +pallet-session = { workspace = true } +pallet-society = { workspace = true } +pallet-staking = { workspace = true } +pallet-staking-async-ah-client = { workspace = true } +pallet-staking-async-rc-client = { workspace = true } +pallet-staking-async-rc-runtime-constants = { workspace = true } +pallet-state-trie-migration = { workspace = true } +pallet-sudo = { workspace = true } +pallet-timestamp = { workspace = true } +pallet-transaction-payment = { workspace = true } +pallet-transaction-payment-rpc-runtime-api = { workspace = true } +pallet-treasury = { workspace = true } +pallet-utility = { workspace = true } +pallet-vesting = { workspace = true } +pallet-whitelist = { workspace = true } +pallet-xcm = { workspace = true } +pallet-xcm-benchmarks = { optional = true, workspace = true } + +frame-benchmarking = { optional = true, workspace = true } +frame-system-benchmarking = { optional = true, workspace = true } +frame-try-runtime = { optional = true, workspace = true } +hex-literal = { workspace = true, default-features = true } +pallet-election-provider-support-benchmarking = { optional = true, workspace = true } +pallet-nomination-pools-benchmarking = { optional = true, workspace = true } +pallet-offences-benchmarking = { optional = true, workspace = true } +pallet-session-benchmarking = { optional = true, workspace = true } + +polkadot-parachain-primitives = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +polkadot-runtime-parachains = { workspace = true } + +xcm = { workspace = true } +xcm-builder = { workspace = true } +xcm-executor = { workspace = true } +xcm-runtime-apis = { workspace = true } + +[dev-dependencies] +approx = { workspace = true } +remote-externalities = { workspace = true, default-features = true } +serde_json = { workspace = true, default-features = true } +sp-keyring = { workspace = true, default-features = true } +sp-tracing = { workspace = true } +tiny-keccak = { features = ["keccak"], workspace = true } +tokio = { features = ["macros"], workspace = true, default-features = true } + +[build-dependencies] +substrate-wasm-builder = { workspace = true, default-features = true } + +[features] +default = ["std"] +no_std = [] +only-staking = [] +std = [ + "binary-merkle-tree/std", + "bitvec/std", + "codec/std", + "frame-benchmarking?/std", + "frame-election-provider-support/std", + "frame-executive/std", + "frame-metadata-hash-extension/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime/std", + "log/std", + "pallet-asset-rate/std", + "pallet-authority-discovery/std", + "pallet-authorship/std", + "pallet-babe/std", + "pallet-bags-list/std", + "pallet-balances/std", + "pallet-beefy-mmr/std", + "pallet-beefy/std", + "pallet-conviction-voting/std", + "pallet-delegated-staking/std", + "pallet-election-provider-multi-phase/std", + "pallet-election-provider-support-benchmarking?/std", + "pallet-elections-phragmen/std", + "pallet-fast-unstake/std", + "pallet-grandpa/std", + "pallet-identity/std", + "pallet-indices/std", + "pallet-membership/std", + "pallet-message-queue/std", + "pallet-migrations/std", + "pallet-mmr/std", + "pallet-multisig/std", + "pallet-nomination-pools-benchmarking?/std", + "pallet-nomination-pools-runtime-api/std", + "pallet-nomination-pools/std", + "pallet-offences-benchmarking?/std", + "pallet-offences/std", + "pallet-parameters/std", + "pallet-preimage/std", + "pallet-proxy/std", + "pallet-recovery/std", + "pallet-referenda/std", + "pallet-root-testing/std", + "pallet-scheduler/std", + "pallet-session-benchmarking?/std", + "pallet-session/std", + "pallet-society/std", + "pallet-staking-async-ah-client/std", + "pallet-staking-async-rc-client/std", + "pallet-staking-async-rc-runtime-constants/std", + "pallet-staking/std", + "pallet-state-trie-migration/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "pallet-treasury/std", + "pallet-utility/std", + "pallet-vesting/std", + "pallet-whitelist/std", + "pallet-xcm-benchmarks?/std", + "pallet-xcm/std", + "polkadot-parachain-primitives/std", + "polkadot-primitives/std", + "polkadot-runtime-common/std", + "polkadot-runtime-parachains/std", + "scale-info/std", + "serde/std", + "serde_derive", + "serde_json/std", + "sp-api/std", + "sp-application-crypto/std", + "sp-arithmetic/std", + "sp-authority-discovery/std", + "sp-block-builder/std", + "sp-consensus-babe/std", + "sp-consensus-beefy/std", + "sp-consensus-grandpa/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-io/std", + "sp-mmr-primitives/std", + "sp-npos-elections/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-staking/std", + "sp-storage/std", + "sp-tracing/std", + "sp-transaction-pool/std", + "sp-version/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm-runtime-apis/std", + "xcm/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-election-provider-support/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-asset-rate/runtime-benchmarks", + "pallet-babe/runtime-benchmarks", + "pallet-bags-list/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-beefy-mmr/runtime-benchmarks", + "pallet-conviction-voting/runtime-benchmarks", + "pallet-delegated-staking/runtime-benchmarks", + "pallet-election-provider-multi-phase/runtime-benchmarks", + "pallet-election-provider-support-benchmarking/runtime-benchmarks", + "pallet-elections-phragmen/runtime-benchmarks", + "pallet-fast-unstake/runtime-benchmarks", + "pallet-grandpa/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", + "pallet-indices/runtime-benchmarks", + "pallet-membership/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-migrations/runtime-benchmarks", + "pallet-mmr/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", + "pallet-nomination-pools-benchmarking/runtime-benchmarks", + "pallet-nomination-pools/runtime-benchmarks", + "pallet-offences-benchmarking/runtime-benchmarks", + "pallet-offences/runtime-benchmarks", + "pallet-parameters/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-recovery/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", + "pallet-session-benchmarking/runtime-benchmarks", + "pallet-society/runtime-benchmarks", + "pallet-staking-async-ah-client/runtime-benchmarks", + "pallet-staking-async-rc-client/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-state-trie-migration/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-transaction-payment/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-vesting/runtime-benchmarks", + "pallet-whitelist/runtime-benchmarks", + "pallet-xcm-benchmarks/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", + "xcm-runtime-apis/runtime-benchmarks", + "xcm/runtime-benchmarks", +] +try-runtime = [ + "frame-election-provider-support/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime", + "frame-try-runtime/try-runtime", + "pallet-asset-rate/try-runtime", + "pallet-authority-discovery/try-runtime", + "pallet-authorship/try-runtime", + "pallet-babe/try-runtime", + "pallet-bags-list/try-runtime", + "pallet-balances/try-runtime", + "pallet-beefy-mmr/try-runtime", + "pallet-beefy/try-runtime", + "pallet-conviction-voting/try-runtime", + "pallet-delegated-staking/try-runtime", + "pallet-election-provider-multi-phase/try-runtime", + "pallet-elections-phragmen/try-runtime", + "pallet-fast-unstake/try-runtime", + "pallet-grandpa/try-runtime", + "pallet-identity/try-runtime", + "pallet-indices/try-runtime", + "pallet-membership/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-migrations/try-runtime", + "pallet-mmr/try-runtime", + "pallet-multisig/try-runtime", + "pallet-nomination-pools/try-runtime", + "pallet-offences/try-runtime", + "pallet-parameters/try-runtime", + "pallet-preimage/try-runtime", + "pallet-proxy/try-runtime", + "pallet-recovery/try-runtime", + "pallet-referenda/try-runtime", + "pallet-root-testing/try-runtime", + "pallet-scheduler/try-runtime", + "pallet-session/try-runtime", + "pallet-society/try-runtime", + "pallet-staking-async-ah-client/try-runtime", + "pallet-staking-async-rc-client/try-runtime", + "pallet-staking/try-runtime", + "pallet-state-trie-migration/try-runtime", + "pallet-sudo/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-treasury/try-runtime", + "pallet-utility/try-runtime", + "pallet-vesting/try-runtime", + "pallet-whitelist/try-runtime", + "pallet-xcm/try-runtime", + "polkadot-runtime-common/try-runtime", + "polkadot-runtime-parachains/try-runtime", + "sp-runtime/try-runtime", +] + +# Enable the metadata hash generation in the wasm builder. +metadata-hash = ["substrate-wasm-builder/metadata-hash"] + +# Set timing constants (e.g. session period) to faster versions to speed up testing. +fast-runtime = [] + +runtime-metrics = [ + "polkadot-runtime-parachains/runtime-metrics", + "sp-io/with-tracing", +] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller, like logging for example. +on-chain-release-build = ["metadata-hash"] diff --git a/substrate/frame/staking-async/runtimes/rc/build.rs b/substrate/frame/staking-async/runtimes/rc/build.rs new file mode 100644 index 0000000000000..a787c0c4f5323 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/build.rs @@ -0,0 +1,39 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#[cfg(all(not(feature = "metadata-hash"), feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::build_using_defaults(); + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .build(); +} + +#[cfg(all(feature = "metadata-hash", feature = "std"))] +fn main() { + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .enable_metadata_hash("WND", 12) + .build(); + substrate_wasm_builder::WasmBuilder::init_with_defaults() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .enable_metadata_hash("WND", 12) + .build(); +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/substrate/frame/staking-async/runtimes/rc/constants/Cargo.toml b/substrate/frame/staking-async/runtimes/rc/constants/Cargo.toml new file mode 100644 index 0000000000000..e865ba4e81fb5 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/constants/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "pallet-staking-async-rc-runtime-constants" +version = "7.0.0" +description = "Constants used throughout the Staking Async RC network." +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[package.metadata.polkadot-sdk] +exclude-from-umbrella = true + +[lints] +workspace = true + +[dependencies] +smallvec = { workspace = true, default-features = true } + +frame-support = { workspace = true } +polkadot-primitives = { workspace = true } +polkadot-runtime-common = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } +sp-weights = { workspace = true } + +xcm = { workspace = true } +xcm-builder = { workspace = true } + +[features] +default = ["std"] +std = [ + "frame-support/std", + "polkadot-primitives/std", + "polkadot-runtime-common/std", + "sp-core/std", + "sp-runtime/std", + "sp-weights/std", + "xcm-builder/std", + "xcm/std", +] + +# Set timing constants (e.g. session period) to faster versions to speed up testing. +fast-runtime = [] diff --git a/substrate/frame/staking-async/runtimes/rc/constants/src/lib.rs b/substrate/frame/staking-async/runtimes/rc/constants/src/lib.rs new file mode 100644 index 0000000000000..7a2663677e9e4 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/constants/src/lib.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod weights; + +/// Money matters. +pub mod currency { + use polkadot_primitives::Balance; + + /// The existential deposit. + pub const EXISTENTIAL_DEPOSIT: Balance = 1 * CENTS; + + pub const UNITS: Balance = 1_000_000_000_000; + pub const CENTS: Balance = UNITS / 100; + pub const MILLICENTS: Balance = CENTS / 1_000; + pub const GRAND: Balance = CENTS * 100_000; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 100 * CENTS + (bytes as Balance) * 5 * MILLICENTS + } +} + +/// Time and blocks. +pub mod time { + use polkadot_primitives::{BlockNumber, Moment}; + use polkadot_runtime_common::prod_or_fast; + + pub const MILLISECS_PER_BLOCK: Moment = 6000; + pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; + pub const EPOCH_DURATION_IN_SLOTS: BlockNumber = prod_or_fast!(1 * HOURS, 1 * MINUTES); + + // These time units are defined in number of blocks. + pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; + + // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. + // The choice of is done in accordance to the slot duration and expected target + // block time, for safely resisting network delays of maximum two seconds. + // + pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); +} + +/// Fee-related. +pub mod fee { + use crate::weights::ExtrinsicBaseWeight; + use frame_support::weights::{ + WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, + }; + use polkadot_primitives::Balance; + use smallvec::smallvec; + pub use sp_runtime::Perbill; + + /// The block saturation level. Fees will be updates based on this value. + pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); + + /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the + /// node's balance type. + /// + /// This should typically create a mapping between the following ranges: + /// - [0,` MAXIMUM_BLOCK_WEIGHT`] + /// - [Balance::min, Balance::max] + /// + /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: + /// - Setting it to `0` will essentially disable the weight fee. + /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. + pub struct WeightToFee; + impl WeightToFeePolynomial for WeightToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // in Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + let p = super::currency::CENTS; + let q = 10 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } +} + +/// System Parachains. +pub mod system_parachain { + use polkadot_primitives::Id; + use xcm_builder::IsChildSystemParachain; + + /// Network's Asset Hub parachain ID. + pub const ASSET_HUB_ID: u32 = 1000; + /// Collectives parachain ID. + pub const COLLECTIVES_ID: u32 = 1001; + /// BridgeHub parachain ID. + pub const BRIDGE_HUB_ID: u32 = 1002; + /// Encointer parachain ID. + pub const ENCOINTER_ID: u32 = 1003; + /// People Chain parachain ID. + pub const PEOPLE_ID: u32 = 1004; + /// Brokerage parachain ID. + pub const BROKER_ID: u32 = 1005; + + /// All system parachains of Westend. + pub type SystemParachains = IsChildSystemParachain; + + /// Coretime constants + pub mod coretime { + /// Coretime timeslice period in blocks + /// WARNING: This constant is used accross chains, so additional care should be taken + /// when changing it. + #[cfg(feature = "fast-runtime")] + pub const TIMESLICE_PERIOD: u32 = 20; + #[cfg(not(feature = "fast-runtime"))] + pub const TIMESLICE_PERIOD: u32 = 80; + } +} + +/// Westend Treasury pallet instance. +pub const TREASURY_PALLET_ID: u8 = 37; + +/// XCM protocol related constants. +pub mod xcm { + /// Pluralistic bodies existing within the consensus. + pub mod body { + // Preallocated for the Root body. + #[allow(dead_code)] + const ROOT_INDEX: u32 = 0; + // The bodies corresponding to the Polkadot OpenGov Origins. + pub const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + #[deprecated = "Will be removed after August 2024; Use `xcm::latest::BodyId::Treasury` \ + instead"] + pub const TREASURER_INDEX: u32 = 2; + } +} + +#[cfg(test)] +mod tests { + use super::{ + currency::{CENTS, MILLICENTS, UNITS}, + fee::WeightToFee, + }; + use crate::weights::ExtrinsicBaseWeight; + use frame_support::weights::WeightToFee as WeightToFeeT; + use polkadot_runtime_common::MAXIMUM_BLOCK_WEIGHT; + + #[test] + // Test that the fee for `MAXIMUM_BLOCK_WEIGHT` of weight has sane bounds. + fn full_block_fee_is_correct() { + // A full block should cost between 10 and 100 UNITS. + let full_block = WeightToFee::weight_to_fee(&MAXIMUM_BLOCK_WEIGHT); + assert!(full_block >= 10 * UNITS); + assert!(full_block <= 100 * UNITS); + } + + #[test] + // This function tests that the fee for `ExtrinsicBaseWeight` of weight is correct + fn extrinsic_base_fee_is_correct() { + // `ExtrinsicBaseWeight` should cost 1/10 of a CENT + println!("Base: {}", ExtrinsicBaseWeight::get()); + let x = WeightToFee::weight_to_fee(&ExtrinsicBaseWeight::get()); + let y = CENTS / 10; + assert!(x.max(y) - x.min(y) < MILLICENTS); + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/constants/src/weights/block_weights.rs b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/block_weights.rs new file mode 100644 index 0000000000000..a806649f19230 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/block_weights.rs @@ -0,0 +1,81 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-14 (Y/M/D) +//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! +//! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` +//! WARMUPS: `10`, REPEAT: `100` +//! WEIGHT-PATH: `runtime/westend/constants/src/weights/` +//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` + +// Executed Command: +// ./target/production/polkadot +// benchmark +// overhead +// --chain=westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --weight-path=runtime/westend/constants/src/weights/ +// --warmup=10 +// --repeat=100 +// --header=./file_header.txt + +use sp_core::parameter_types; +use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; + +parameter_types! { + /// Time to execute an empty block. + /// Calculated by multiplying the *Average* with `1.0` and adding `0`. + /// + /// Stats nanoseconds: + /// Min, Max: 449_093, 498_211 + /// Average: 461_988 + /// Median: 459_070 + /// Std-Dev: 10124.58 + /// + /// Percentiles nanoseconds: + /// 99th: 493_580 + /// 95th: 482_929 + /// 75th: 464_502 + pub const BlockExecutionWeight: Weight = + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(461_988), 0); +} + +#[cfg(test)] +mod test_weights { + use sp_weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::BlockExecutionWeight::get(); + + // At least 100 µs. + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 100 µs." + ); + // At most 50 ms. + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 50 ms." + ); + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/constants/src/weights/extrinsic_weights.rs b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/extrinsic_weights.rs new file mode 100644 index 0000000000000..a0fdc1bf6db7a --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/extrinsic_weights.rs @@ -0,0 +1,81 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-14 (Y/M/D) +//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! +//! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` +//! WARMUPS: `10`, REPEAT: `100` +//! WEIGHT-PATH: `runtime/westend/constants/src/weights/` +//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` + +// Executed Command: +// ./target/production/polkadot +// benchmark +// overhead +// --chain=westend-dev +// --execution=wasm +// --wasm-execution=compiled +// --weight-path=runtime/westend/constants/src/weights/ +// --warmup=10 +// --repeat=100 +// --header=./file_header.txt + +use sp_core::parameter_types; +use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight}; + +parameter_types! { + /// Time to execute a NO-OP extrinsic, for example `System::remark`. + /// Calculated by multiplying the *Average* with `1.0` and adding `0`. + /// + /// Stats nanoseconds: + /// Min, Max: 112_202, 116_271 + /// Average: 113_632 + /// Median: 113_689 + /// Std-Dev: 576.31 + /// + /// Percentiles nanoseconds: + /// 99th: 114_688 + /// 95th: 114_367 + /// 75th: 113_969 + pub const ExtrinsicBaseWeight: Weight = + Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(113_632), 0); +} + +#[cfg(test)] +mod test_weights { + use sp_weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::ExtrinsicBaseWeight::get(); + + // At least 10 µs. + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 10 µs." + ); + // At most 1 ms. + assert!( + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 1 ms." + ); + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/constants/src/weights/mod.rs b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/mod.rs new file mode 100644 index 0000000000000..23812ce7ed052 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/mod.rs @@ -0,0 +1,28 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Expose the auto generated weight files. + +pub mod block_weights; +pub mod extrinsic_weights; +pub mod paritydb_weights; +pub mod rocksdb_weights; + +pub use block_weights::BlockExecutionWeight; +pub use extrinsic_weights::ExtrinsicBaseWeight; +pub use paritydb_weights::constants::ParityDbWeight; +pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/substrate/frame/staking-async/runtimes/rc/constants/src/weights/paritydb_weights.rs b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/paritydb_weights.rs new file mode 100644 index 0000000000000..25679703831a1 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/paritydb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights + /// are available for brave runtime engineers who may want to try this out as default. + pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::ParityDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/constants/src/weights/rocksdb_weights.rs b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/rocksdb_weights.rs new file mode 100644 index 0000000000000..3dd817aa6f137 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/constants/src/weights/rocksdb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout + /// the runtime. + pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::RocksDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/genesis_config_presets.rs b/substrate/frame/staking-async/runtimes/rc/src/genesis_config_presets.rs new file mode 100644 index 0000000000000..7140d53d29359 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/genesis_config_presets.rs @@ -0,0 +1,407 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Genesis configs presets for the Westend runtime + +use crate::{ + BabeConfig, BalancesConfig, ConfigurationConfig, RegistrarConfig, RuntimeGenesisConfig, + SessionConfig, SessionKeys, StakingAsyncAhClientConfig, SudoConfig, BABE_GENESIS_EPOCH_CONFIG, +}; +#[cfg(not(feature = "std"))] +use alloc::format; +use alloc::{vec, vec::Vec}; +use frame_support::build_struct_json_patch; +use pallet_staking_async_rc_runtime_constants::currency::UNITS as WND; +use polkadot_primitives::{AccountId, AssignmentId, SchedulerParams, ValidatorId}; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; +use sp_consensus_grandpa::AuthorityId as GrandpaId; +use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; +use sp_genesis_builder::PresetId; +use sp_keyring::Sr25519Keyring; + +/// Helper function to generate stash, controller and session key from seed +fn get_authority_keys_from_seed( + seed: &str, +) -> ( + AccountId, + AccountId, + BabeId, + GrandpaId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + BeefyId, +) { + let keys = get_authority_keys_from_seed_no_beefy(seed); + ( + keys.0, + keys.1, + keys.2, + keys.3, + keys.4, + keys.5, + keys.6, + get_public_from_string_or_panic::(seed), + ) +} + +/// Helper function to generate stash, controller and session key from seed +fn get_authority_keys_from_seed_no_beefy( + seed: &str, +) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { + ( + get_public_from_string_or_panic::(&format!("{}//stash", seed)).into(), + get_public_from_string_or_panic::(seed).into(), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + get_public_from_string_or_panic::(seed), + ) +} + +fn testnet_accounts() -> Vec { + Sr25519Keyring::well_known().map(|k| k.to_account_id()).collect() +} + +fn westend_session_keys( + babe: BabeId, + grandpa: GrandpaId, + para_validator: ValidatorId, + para_assignment: AssignmentId, + authority_discovery: AuthorityDiscoveryId, + beefy: BeefyId, +) -> SessionKeys { + SessionKeys { babe, grandpa, para_validator, para_assignment, authority_discovery, beefy } +} + +fn default_parachains_host_configuration( +) -> polkadot_runtime_parachains::configuration::HostConfiguration +{ + use polkadot_primitives::{ + node_features::FeatureIndex, ApprovalVotingParams, AsyncBackingParams, MAX_CODE_SIZE, + MAX_POV_SIZE, + }; + + polkadot_runtime_parachains::configuration::HostConfiguration { + validation_upgrade_cooldown: 2u32, + validation_upgrade_delay: 2, + code_retention_period: 1200, + max_code_size: MAX_CODE_SIZE, + max_pov_size: MAX_POV_SIZE, + max_head_data_size: 32 * 1024, + max_upward_queue_count: 8, + max_upward_queue_size: 1024 * 1024, + max_downward_message_size: 1024 * 1024, + max_upward_message_size: 50 * 1024, + max_upward_message_num_per_candidate: 5, + hrmp_sender_deposit: 0, + hrmp_recipient_deposit: 0, + hrmp_channel_max_capacity: 8, + hrmp_channel_max_total_size: 8 * 1024, + hrmp_max_parachain_inbound_channels: 4, + hrmp_channel_max_message_size: 1024 * 1024, + hrmp_max_parachain_outbound_channels: 4, + hrmp_max_message_num_per_candidate: 5, + dispute_period: 6, + no_show_slots: 2, + n_delay_tranches: 25, + needed_approvals: 2, + relay_vrf_modulo_samples: 2, + zeroth_delay_tranche_width: 0, + minimum_validation_upgrade_delay: 5, + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + node_features: bitvec::vec::BitVec::from_element( + 1u8 << (FeatureIndex::ElasticScalingMVP as usize) | + 1u8 << (FeatureIndex::EnableAssignmentsV2 as usize) | + 1u8 << (FeatureIndex::CandidateReceiptV2 as usize), + ), + scheduler_params: SchedulerParams { + lookahead: 3, + group_rotation_frequency: 20, + paras_availability_period: 4, + ..Default::default() + }, + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 5 }, + ..Default::default() + } +} + +#[test] +fn default_parachains_host_configuration_is_consistent() { + default_parachains_host_configuration().panic_if_not_consistent(); +} + +/// Helper function to create westend runtime `GenesisConfig` patch for testing +fn westend_testnet_genesis( + initial_authorities: Vec<( + AccountId, + AccountId, + BabeId, + GrandpaId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + BeefyId, + )>, + root_key: AccountId, + endowed_accounts: Option>, +) -> serde_json::Value { + let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); + + const ENDOWMENT: u128 = 1_000_000 * WND; + + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts.iter().map(|k| (k.clone(), ENDOWMENT)).collect::>(), + }, + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + westend_session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + ), + ) + }) + .collect::>(), + }, + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + sudo: SudoConfig { key: Some(root_key) }, + configuration: ConfigurationConfig { config: default_parachains_host_configuration() }, + registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, + staking_async_ah_client: StakingAsyncAhClientConfig { + operating_mode: pallet_staking_async_ah_client::OperatingMode::Active, + ..Default::default() + } + }) +} + +// staging_testnet +fn westend_staging_testnet_config_genesis() -> serde_json::Value { + use hex_literal::hex; + use sp_core::crypto::UncheckedInto; + + // Following keys are used in genesis config for development chains. + // DO NOT use them in production chains as the secret seed is public. + // + // SECRET_SEED="slow awkward present example safe bundle science ocean cradle word tennis earn" + // subkey inspect -n polkadot "$SECRET_SEED" + let endowed_accounts: Vec = vec![ + // 15S75FkhCWEowEGfxWwVfrW3LQuy8w8PNhVmrzfsVhCMjUh1 + hex!["c416837e232d9603e83162ef4bda08e61580eeefe60fe92fc044aa508559ae42"].into(), + ]; + // SECRET=$SECRET_SEED ./scripts/prepare-test-net.sh 4 + let initial_authorities: Vec<( + AccountId, + AccountId, + BabeId, + GrandpaId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + BeefyId, + )> = Vec::from([ + ( + //5EvydUTtHvt39Khac3mMxNPgzcfu49uPDzUs3TL7KEzyrwbw + hex!["7ecfd50629cdd246649959d88d490b31508db511487e111a52a392e6e458f518"].into(), + //5HQyX5gyy77m9QLXguAhiwjTArHYjYspeY98dYDu1JDetfZg + hex!["eca2cca09bdc66a7e6d8c3d9499a0be2ad4690061be8a9834972e17d13d2fe7e"].into(), + //5G13qYRudTyttwTJvHvnwp8StFtcfigyPnwfD4v7LNopsnX4 + hex!["ae27367cb77850fb195fe1f9c60b73210409e68c5ad953088070f7d8513d464c"] + .unchecked_into(), + //5Eb7wM65PNgtY6e33FEAzYtU5cRTXt6WQvZTnzaKQwkVcABk + hex!["6faae44b21c6f2681a7f60df708e9f79d340f7d441d28bd987fab8d05c6487e8"] + .unchecked_into(), + //5FqMLAgygdX9UqzukDp15Uid9PAKdFAR621U7xtp5ut2NfrW + hex!["a6c1a5b501985a83cb1c37630c5b41e6b0a15b3675b2fd94694758e6cfa6794d"] + .unchecked_into(), + //5DhXAV75BKvF9o447ikWqLttyL2wHtLMFSX7GrsKF9Ny61Ta + hex!["485051748ab9c15732f19f3fbcf1fd00a6d9709635f084505107fbb059c33d2f"] + .unchecked_into(), + //5GNHfmrtWLTawnGCmc39rjAEiW97vKvE7DGePYe4am5JtE4i + hex!["be59ed75a72f7b47221ce081ba4262cf2e1ea7867e30e0b3781822f942b97677"] + .unchecked_into(), + //5DA6Z8RUF626stn94aTRBCeobDCYcFbU7Pdk4Tz1R9vA8B8F + hex!["0207e43990799e1d02b0507451e342a1240ff836ea769c57297589a5fd072ad8f4"] + .unchecked_into(), + ), + ( + //5DFpvDUdCgw54E3E357GR1PyJe3Ft9s7Qyp7wbELAoJH9RQa + hex!["34b7b3efd35fcc3c1926ca065381682b1af29b57dabbcd091042c6de1d541b7d"].into(), + //5DZSSsND5wCjngvyXv27qvF3yPzt3MCU8rWnqNy4imqZmjT8 + hex!["4226796fa792ac78875e023ff2e30e3c2cf79f0b7b3431254cd0f14a3007bc0e"].into(), + //5CPrgfRNDQvQSnLRdeCphP3ibj5PJW9ESbqj2fw29vBMNQNn + hex!["0e9b60f04be3bffe362eb2212ea99d2b909b052f4bff7c714e13c2416a797f5d"] + .unchecked_into(), + //5FXFsPReTUEYPRNKhbTdUathcWBsxTNsLbk2mTpYdKCJewjA + hex!["98f4d81cb383898c2c3d54dab28698c0f717c81b509cb32dc6905af3cc697b18"] + .unchecked_into(), + //5CZjurB78XbSHf6SLkLhCdkqw52Zm7aBYUDdfkLqEDWJ9Zhj + hex!["162508accd470e379b04cb0c7c60b35a7d5357e84407a89ed2dd48db4b726960"] + .unchecked_into(), + //5DkAqCtSjUMVoJFauuGoAbSEgn2aFCRGziKJiLGpPwYgE1pS + hex!["4a559c028b69a7f784ce553393e547bec0aa530352157603396d515f9c83463b"] + .unchecked_into(), + //5GsBt9MhGwkg8Jfb1F9LAy2kcr88WNyNy4L5ezwbCr8NWKQU + hex!["d464908266c878acbf181bf8fda398b3aa3fd2d05508013e414aaece4cf0d702"] + .unchecked_into(), + //5DtJVkz8AHevEnpszy3X4dUcPvACW6x1qBMQZtFxjexLr5bq + hex!["02fdf30222d2cb88f2376d558d3de9cb83f9fde3aa4b2dd40c93e3104e3488bcd2"] + .unchecked_into(), + ), + ( + //5E2cob2jrXsBkTih56pizwSqENjE4siaVdXhaD6akLdDyVq7 + hex!["56e0f73c563d49ee4a3971c393e17c44eaa313dabad7fcf297dc3271d803f303"].into(), + //5D4rNYgP9uFNi5GMyDEXTfiaFLjXyDEEX2VvuqBVi3f1qgCh + hex!["2c58e5e1d5aef77774480cead4f6876b1a1a6261170166995184d7f86140572b"].into(), + //5Ea2D65KXqe625sz4uV1jjhSfuigVnkezC8VgEj9LXN7ERAk + hex!["6ed45cb7af613be5d88a2622921e18d147225165f24538af03b93f2a03ce6e13"] + .unchecked_into(), + //5G4kCbgqUhEyrRHCyFwFEkgBZXoYA8sbgsRxT9rY8Tp5Jj5F + hex!["b0f8d2b9e4e1eafd4dab6358e0b9d5380d78af27c094e69ae9d6d30ca300fd86"] + .unchecked_into(), + //5CS7thd2n54WfqeKU3cjvZzK4z5p7zku1Zw97mSzXgPioAAs + hex!["1055100a283968271a0781450b389b9093231be809be1e48a305ebad2a90497e"] + .unchecked_into(), + //5DSaL4ZmSYarZSazhL5NQh7LT6pWhNRDcefk2QS9RxEXfsJe + hex!["3cea4ab74bab4adf176cf05a6e18c1599a7bc217d4c6c217275bfbe3b037a527"] + .unchecked_into(), + //5CaNLkYEbFYXZodXhd3UjV6RNLjFGNLiYafc8X5NooMkZiAq + hex!["169faa81aebfe74533518bda28567f2e2664014c8905aa07ea003336afda5a58"] + .unchecked_into(), + //5ERwhKiePayukzZStMuzGzRJGxGRFpwxYUXVarQpMSMrXzDS + hex!["03429d0d20f6ac5ca8b349f04d014f7b5b864acf382a744104d5d9a51108156c0f"] + .unchecked_into(), + ), + ( + //5H6j9ovzYk9opckVjvM9SvVfaK37ASTtPTzWeRfqk1tgLJUN + hex!["deb804ed2ed2bb696a3dd4ed7de4cd5c496528a2b204051c6ace385bacd66a3a"].into(), + //5DJ51tMW916mGwjMpfS1o9skcNt6Sb28YnZQXaKVg4h89agE + hex!["366da6a748afedb31f07902f2de36ab265beccee37762d3ae1f237de234d9c36"].into(), + //5CSPYDYoCDGSoSLgSp4EHkJ52YasZLHG2woqhPZkdbtNQpke + hex!["1089bc0cd60237d061872925e81d36c9d9205d250d5d8b542c8e08a8ecf1b911"] + .unchecked_into(), + //5ChfdrAqmLjCeDJvynbMjcxYLHYzPe8UWXd3HnX9JDThUMbn + hex!["1c309a70b4e274314b84c9a0a1f973c9c4fc084df5479ef686c54b1ae4950424"] + .unchecked_into(), + //5D8C3HHEp5E8fJsXRD56494F413CdRSR9QKGXe7v5ZEfymdj + hex!["2ee4d78f328db178c54f205ac809da12e291a33bcbd4f29f081ce7e74bdc5044"] + .unchecked_into(), + //5GxeTYCGmp1C3ZRLDkRWqJc6gB2GYmuqnygweuH3vsivMQq6 + hex!["d88e40e3c2c7a7c5abf96ffdd8f7b7bec8798cc277bc97e255881871ab73b529"] + .unchecked_into(), + //5DoGpsgSLcJsHa9B8V4PKjxegWAqDZttWfxicAd68prUX654 + hex!["4cb3863271b70daa38612acd5dae4f5afcb7c165fa277629e5150d2214df322a"] + .unchecked_into(), + //5G1KLjqFyMsPAodnjSRkwRFJztTTEzmZWxow2Q3ZSRCPdthM + hex!["03be5ec86d10a94db89c9b7a396d3c7742e3bec5f85159d4cf308cef505966ddf5"] + .unchecked_into(), + ), + ]); + + const ENDOWMENT: u128 = 1_000_000 * WND; + const STASH: u128 = 100 * WND; + + build_struct_json_patch!(RuntimeGenesisConfig { + balances: BalancesConfig { + balances: endowed_accounts + .iter() + .map(|k: &AccountId| (k.clone(), ENDOWMENT)) + .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) + .collect::>(), + }, + session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + westend_session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + ), + ) + }) + .collect::>(), + }, + + babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG }, + sudo: SudoConfig { key: Some(endowed_accounts[0].clone()) }, + configuration: ConfigurationConfig { config: default_parachains_host_configuration() }, + registrar: RegistrarConfig { next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID }, + }) +} + +//development +fn westend_development_config_genesis() -> serde_json::Value { + westend_testnet_genesis( + Vec::from([get_authority_keys_from_seed("Alice")]), + Sr25519Keyring::Alice.to_account_id(), + None, + ) +} + +//local_testnet +fn westend_local_testnet_genesis() -> serde_json::Value { + westend_testnet_genesis( + Vec::from([get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob")]), + Sr25519Keyring::Alice.to_account_id(), + None, + ) +} + +/// Provides the JSON representation of predefined genesis config for given `id`. +pub fn get_preset(id: &PresetId) -> Option> { + let patch = match id.as_ref() { + sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => westend_local_testnet_genesis(), + sp_genesis_builder::DEV_RUNTIME_PRESET => westend_development_config_genesis(), + "staging_testnet" => westend_staging_testnet_config_genesis(), + _ => return None, + }; + Some( + serde_json::to_string(&patch) + .expect("serialization to json is expected to work. qed.") + .into_bytes(), + ) +} + +/// List of supported presets. +pub fn preset_names() -> Vec { + vec![ + PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET), + PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET), + PresetId::from("staging_testnet"), + ] +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/governance/mod.rs b/substrate/frame/staking-async/runtimes/rc/src/governance/mod.rs new file mode 100644 index 0000000000000..47f1f8c2e98a7 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/governance/mod.rs @@ -0,0 +1,100 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! New governance configurations for the Kusama runtime. + +use super::*; +use crate::xcm_config::Collectives; +use frame_support::{parameter_types, traits::EitherOf}; +use frame_system::EnsureRootWithSuccess; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use xcm::latest::BodyId; + +mod origins; +pub use origins::{ + pallet_custom_origins, AuctionAdmin, FellowshipAdmin, GeneralAdmin, LeaseAdmin, + ReferendumCanceller, ReferendumKiller, Spender, StakingAdmin, Treasurer, WhitelistedCaller, +}; +mod tracks; +pub use tracks::TracksInfo; + +parameter_types! { + pub const VoteLockingPeriod: BlockNumber = 7 * DAYS; +} + +impl pallet_conviction_voting::Config for Runtime { + type WeightInfo = weights::pallet_conviction_voting::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type VoteLockingPeriod = VoteLockingPeriod; + type MaxVotes = ConstU32<512>; + type MaxTurnout = + frame_support::traits::tokens::currency::ActiveIssuanceOf; + type Polls = Referenda; + type BlockNumberProvider = System; + type VotingHooks = (); +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 1 * 3 * CENTS; + pub const UndecidingTimeout: BlockNumber = 14 * DAYS; +} + +parameter_types! { + pub const MaxBalance: Balance = Balance::max_value(); +} +pub type TreasurySpender = EitherOf, Spender>; + +impl origins::pallet_custom_origins::Config for Runtime {} + +parameter_types! { + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + +impl pallet_whitelist::Config for Runtime { + type WeightInfo = weights::pallet_whitelist::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type WhitelistOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, + >; + type DispatchWhitelistedOrigin = EitherOf, WhitelistedCaller>; + type Preimages = Preimage; +} + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + type SubmitOrigin = frame_system::EnsureSigned; + type CancelOrigin = EitherOf, ReferendumCanceller>; + type KillOrigin = EitherOf, ReferendumKiller>; + type Slash = Treasury; + type Votes = pallet_conviction_voting::VotesOf; + type Tally = pallet_conviction_voting::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = TracksInfo; + type Preimages = Preimage; + type BlockNumberProvider = System; +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/governance/origins.rs b/substrate/frame/staking-async/runtimes/rc/src/governance/origins.rs new file mode 100644 index 0000000000000..dfcac957bb1a6 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/governance/origins.rs @@ -0,0 +1,204 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Custom origins for governance interventions. + +pub use pallet_custom_origins::*; + +#[frame_support::pallet] +pub mod pallet_custom_origins { + use crate::{Balance, CENTS, GRAND}; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[derive( + PartialEq, + Eq, + Clone, + MaxEncodedLen, + Encode, + Decode, + DecodeWithMemTracking, + TypeInfo, + RuntimeDebug, + )] + #[pallet::origin] + pub enum Origin { + /// Origin for cancelling slashes. + StakingAdmin, + /// Origin for spending (any amount of) funds. + Treasurer, + /// Origin for managing the composition of the fellowship. + FellowshipAdmin, + /// Origin for managing the registrar. + GeneralAdmin, + /// Origin for starting auctions. + AuctionAdmin, + /// Origin able to force slot leases. + LeaseAdmin, + /// Origin able to cancel referenda. + ReferendumCanceller, + /// Origin able to kill referenda. + ReferendumKiller, + /// Origin able to spend up to 1 KSM from the treasury at once. + SmallTipper, + /// Origin able to spend up to 5 KSM from the treasury at once. + BigTipper, + /// Origin able to spend up to 50 KSM from the treasury at once. + SmallSpender, + /// Origin able to spend up to 500 KSM from the treasury at once. + MediumSpender, + /// Origin able to spend up to 5,000 KSM from the treasury at once. + BigSpender, + /// Origin able to dispatch a whitelisted call. + WhitelistedCaller, + /// Origin commanded by any members of the Polkadot Fellowship (no Dan grade needed). + FellowshipInitiates, + /// Origin commanded by Polkadot Fellows (3rd Dan fellows or greater). + Fellows, + /// Origin commanded by Polkadot Experts (5th Dan fellows or greater). + FellowshipExperts, + /// Origin commanded by Polkadot Masters (7th Dan fellows of greater). + FellowshipMasters, + /// Origin commanded by rank 1 of the Polkadot Fellowship and with a success of 1. + Fellowship1Dan, + /// Origin commanded by rank 2 of the Polkadot Fellowship and with a success of 2. + Fellowship2Dan, + /// Origin commanded by rank 3 of the Polkadot Fellowship and with a success of 3. + Fellowship3Dan, + /// Origin commanded by rank 4 of the Polkadot Fellowship and with a success of 4. + Fellowship4Dan, + /// Origin commanded by rank 5 of the Polkadot Fellowship and with a success of 5. + Fellowship5Dan, + /// Origin commanded by rank 6 of the Polkadot Fellowship and with a success of 6. + Fellowship6Dan, + /// Origin commanded by rank 7 of the Polkadot Fellowship and with a success of 7. + Fellowship7Dan, + /// Origin commanded by rank 8 of the Polkadot Fellowship and with a success of 8. + Fellowship8Dan, + /// Origin commanded by rank 9 of the Polkadot Fellowship and with a success of 9. + Fellowship9Dan, + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + StakingAdmin, + Treasurer, + FellowshipAdmin, + GeneralAdmin, + AuctionAdmin, + LeaseAdmin, + ReferendumCanceller, + ReferendumKiller, + WhitelistedCaller, + FellowshipInitiates: u16 = 0, + Fellows: u16 = 3, + FellowshipExperts: u16 = 5, + FellowshipMasters: u16 = 7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result = Err(()); + $( + let _result: Result = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + decl_ensure! { + pub type Spender: EnsureOrigin { + SmallTipper = 250 * 3 * CENTS, + BigTipper = 1 * GRAND, + SmallSpender = 10 * GRAND, + MediumSpender = 100 * GRAND, + BigSpender = 1_000 * GRAND, + Treasurer = 10_000 * GRAND, + } + } + + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin { + Fellowship1Dan = 1, + Fellowship2Dan = 2, + Fellowship3Dan = 3, + Fellowship4Dan = 4, + Fellowship5Dan = 5, + Fellowship6Dan = 6, + Fellowship7Dan = 7, + Fellowship8Dan = 8, + Fellowship9Dan = 9, + } + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/governance/tracks.rs b/substrate/frame/staking-async/runtimes/rc/src/governance/tracks.rs new file mode 100644 index 0000000000000..f674233e09bff --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/governance/tracks.rs @@ -0,0 +1,325 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Track configurations for governance. + +use super::*; + +use alloc::borrow::Cow; +use sp_runtime::str_array as s; + +const fn percent(x: i32) -> sp_arithmetic::FixedI64 { + sp_arithmetic::FixedI64::from_rational(x as u128, 100) +} +use pallet_referenda::Curve; +const APP_ROOT: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_ROOT: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_STAKING_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_STAKING_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_TREASURER: Curve = Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_TREASURER: Curve = Curve::make_linear(28, 28, percent(0), percent(50)); +const APP_FELLOWSHIP_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_FELLOWSHIP_ADMIN: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_GENERAL_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(4, 28, percent(80), percent(50), percent(100)); +const SUP_AUCTION_ADMIN: Curve = + Curve::make_reciprocal(7, 28, percent(10), percent(0), percent(50)); +const APP_LEASE_ADMIN: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_LEASE_ADMIN: Curve = Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_CANCELLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_CANCELLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_REFERENDUM_KILLER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_REFERENDUM_KILLER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_SMALL_TIPPER: Curve = Curve::make_reciprocal(1, 28, percent(4), percent(0), percent(50)); +const APP_BIG_TIPPER: Curve = Curve::make_linear(10, 28, percent(50), percent(100)); +const SUP_BIG_TIPPER: Curve = Curve::make_reciprocal(8, 28, percent(1), percent(0), percent(50)); +const APP_SMALL_SPENDER: Curve = Curve::make_linear(17, 28, percent(50), percent(100)); +const SUP_SMALL_SPENDER: Curve = + Curve::make_reciprocal(12, 28, percent(1), percent(0), percent(50)); +const APP_MEDIUM_SPENDER: Curve = Curve::make_linear(23, 28, percent(50), percent(100)); +const SUP_MEDIUM_SPENDER: Curve = + Curve::make_reciprocal(16, 28, percent(1), percent(0), percent(50)); +const APP_BIG_SPENDER: Curve = Curve::make_linear(28, 28, percent(50), percent(100)); +const SUP_BIG_SPENDER: Curve = Curve::make_reciprocal(20, 28, percent(1), percent(0), percent(50)); +const APP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(16, 28 * 24, percent(96), percent(50), percent(100)); +const SUP_WHITELISTED_CALLER: Curve = + Curve::make_reciprocal(1, 28, percent(20), percent(5), percent(50)); + +const TRACKS_DATA: [pallet_referenda::Track; 15] = [ + pallet_referenda::Track { + id: 0, + info: pallet_referenda::TrackInfo { + name: s("root"), + max_deciding: 1, + decision_deposit: 100 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_ROOT, + min_support: SUP_ROOT, + }, + }, + pallet_referenda::Track { + id: 1, + info: pallet_referenda::TrackInfo { + name: s("whitelisted_caller"), + max_deciding: 100, + decision_deposit: 10 * GRAND, + prepare_period: 6 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_WHITELISTED_CALLER, + min_support: SUP_WHITELISTED_CALLER, + }, + }, + pallet_referenda::Track { + id: 10, + info: pallet_referenda::TrackInfo { + name: s("staking_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_STAKING_ADMIN, + min_support: SUP_STAKING_ADMIN, + }, + }, + pallet_referenda::Track { + id: 11, + info: pallet_referenda::TrackInfo { + name: s("treasurer"), + max_deciding: 10, + decision_deposit: 1 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_TREASURER, + min_support: SUP_TREASURER, + }, + }, + pallet_referenda::Track { + id: 12, + info: pallet_referenda::TrackInfo { + name: s("lease_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_LEASE_ADMIN, + min_support: SUP_LEASE_ADMIN, + }, + }, + pallet_referenda::Track { + id: 13, + info: pallet_referenda::TrackInfo { + name: s("fellowship_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_FELLOWSHIP_ADMIN, + min_support: SUP_FELLOWSHIP_ADMIN, + }, + }, + pallet_referenda::Track { + id: 14, + info: pallet_referenda::TrackInfo { + name: s("general_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_GENERAL_ADMIN, + min_support: SUP_GENERAL_ADMIN, + }, + }, + pallet_referenda::Track { + id: 15, + info: pallet_referenda::TrackInfo { + name: s("auction_admin"), + max_deciding: 10, + decision_deposit: 5 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_AUCTION_ADMIN, + min_support: SUP_AUCTION_ADMIN, + }, + }, + pallet_referenda::Track { + id: 20, + info: pallet_referenda::TrackInfo { + name: s("referendum_canceller"), + max_deciding: 1_000, + decision_deposit: 10 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_CANCELLER, + min_support: SUP_REFERENDUM_CANCELLER, + }, + }, + pallet_referenda::Track { + id: 21, + info: pallet_referenda::TrackInfo { + name: s("referendum_killer"), + max_deciding: 1_000, + decision_deposit: 50 * GRAND, + prepare_period: 8 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 8 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_REFERENDUM_KILLER, + min_support: SUP_REFERENDUM_KILLER, + }, + }, + pallet_referenda::Track { + id: 30, + info: pallet_referenda::TrackInfo { + name: s("small_tipper"), + max_deciding: 200, + decision_deposit: 1 * 3 * CENTS, + prepare_period: 1 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 4 * MINUTES, + min_enactment_period: 1 * MINUTES, + min_approval: APP_SMALL_TIPPER, + min_support: SUP_SMALL_TIPPER, + }, + }, + pallet_referenda::Track { + id: 31, + info: pallet_referenda::TrackInfo { + name: s("big_tipper"), + max_deciding: 100, + decision_deposit: 10 * 3 * CENTS, + prepare_period: 4 * MINUTES, + decision_period: 14 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 3 * MINUTES, + min_approval: APP_BIG_TIPPER, + min_support: SUP_BIG_TIPPER, + }, + }, + pallet_referenda::Track { + id: 32, + info: pallet_referenda::TrackInfo { + name: s("small_spender"), + max_deciding: 50, + decision_deposit: 100 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 10 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_SMALL_SPENDER, + min_support: SUP_SMALL_SPENDER, + }, + }, + pallet_referenda::Track { + id: 33, + info: pallet_referenda::TrackInfo { + name: s("medium_spender"), + max_deciding: 50, + decision_deposit: 200 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 12 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_MEDIUM_SPENDER, + min_support: SUP_MEDIUM_SPENDER, + }, + }, + pallet_referenda::Track { + id: 34, + info: pallet_referenda::TrackInfo { + name: s("big_spender"), + max_deciding: 50, + decision_deposit: 400 * 3 * CENTS, + prepare_period: 10 * MINUTES, + decision_period: 20 * MINUTES, + confirm_period: 14 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: APP_BIG_SPENDER, + min_support: SUP_BIG_SPENDER, + }, + }, +]; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = u16; + type RuntimeOrigin = ::PalletsOrigin; + + fn tracks( + ) -> impl Iterator>> + { + TRACKS_DATA.iter().map(Cow::Borrowed) + } + fn track_for(id: &Self::RuntimeOrigin) -> Result { + if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { + match system_origin { + frame_system::RawOrigin::Root => Ok(0), + _ => Err(()), + } + } else if let Ok(custom_origin) = origins::Origin::try_from(id.clone()) { + match custom_origin { + origins::Origin::WhitelistedCaller => Ok(1), + // General admin + origins::Origin::StakingAdmin => Ok(10), + origins::Origin::Treasurer => Ok(11), + origins::Origin::LeaseAdmin => Ok(12), + origins::Origin::FellowshipAdmin => Ok(13), + origins::Origin::GeneralAdmin => Ok(14), + origins::Origin::AuctionAdmin => Ok(15), + // Referendum admins + origins::Origin::ReferendumCanceller => Ok(20), + origins::Origin::ReferendumKiller => Ok(21), + // Limited treasury spenders + origins::Origin::SmallTipper => Ok(30), + origins::Origin::BigTipper => Ok(31), + origins::Origin::SmallSpender => Ok(32), + origins::Origin::MediumSpender => Ok(33), + origins::Origin::BigSpender => Ok(34), + _ => Err(()), + } + } else { + Err(()) + } + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/impls.rs b/substrate/frame/staking-async/runtimes/rc/src/impls.rs new file mode 100644 index 0000000000000..f7b20081847c3 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/impls.rs @@ -0,0 +1,184 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::xcm_config; +use alloc::{boxed::Box, vec}; +use codec::{Decode, Encode}; +use core::marker::PhantomData; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use pallet_staking_async_rc_runtime_constants::currency::*; +use polkadot_primitives::Balance; +use polkadot_runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use xcm::{latest::prelude::*, VersionedLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/westend.rs#L28 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +// Note / Warning: This implementation should only be used in a transactional context. If not, then +// an error could result in assets being burned. +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::polkadot_runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let wnd = Asset { id: AssetId(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: Location = Location::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // withdraw the asset from `who` + let who_origin = + Junction::AccountId32 { network: None, id: who.clone().into() }.into_location(); + let _withdrawn = xcm_config::LocalAssetTransactor::withdraw_asset(&wnd, &who_origin, None) + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "withdraw_asset(what: {:?}, who_origin: {:?}) error: {:?}", + wnd, who_origin, err + ); + pallet_xcm::Error::::LowBalance + })?; + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "can_check_out(destination: {:?}, asset: {:?}, _) error: {:?}", + destination, wnd, err + ); + pallet_xcm::Error::::CannotCheckOutTeleport + })?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let wnd_reanchored: Assets = + vec![Asset { id: AssetId(Location::new(1, Here)), fun: Fungible(total_to_send) }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(wnd_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + call: poke.encode().into(), + fallback_max_weight: Some(remote_weight_limit), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedLocation::from(destination)), + Box::new(VersionedXcm::from(program)), + )?; + Ok(()) + } + + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful_identity_reaping(_: &AccountId, _: u32, _: u32) { + crate::Dmp::make_parachain_reachable(1004); + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/lib.rs b/substrate/frame/staking-async/runtimes/rc/src/lib.rs new file mode 100644 index 0000000000000..d2e0a68f98a31 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/lib.rs @@ -0,0 +1,2857 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The Westend runtime. This can be compiled with `#[no_std]`, ready for Wasm. + +#![cfg_attr(not(feature = "std"), no_std)] +// `#[frame_support::runtime]!` does a lot of recursion and requires us to increase the limit. +#![recursion_limit = "512"] + +extern crate alloc; + +use alloc::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + vec, + vec::Vec, +}; +use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen}; +use frame_election_provider_support::{ + bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen, VoteWeight, +}; +use frame_support::{ + derive_impl, + dynamic_params::{dynamic_pallet_params, dynamic_params}, + genesis_builder_helper::{build_state, get_preset}, + pallet_prelude::PhantomData, + parameter_types, + traits::{ + fungible::HoldConsideration, tokens::UnityOrOuterConversion, ConstBool, ConstU32, Contains, + EitherOf, EitherOfDiverse, EnsureOriginWithArg, EverythingBut, FromContains, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, Nothing, ProcessMessage, + ProcessMessageError, VariantCountOf, WithdrawReasons, + }, + weights::{ConstantMultiplier, WeightMeter, WeightToFee as _}, + PalletId, +}; +pub use frame_system::Call as SystemCall; +use frame_system::{EnsureRoot, EnsureSigned}; +pub use pallet_balances::Call as BalancesCall; +use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; +use pallet_identity::legacy::IdentityInfo; +use pallet_session::historical as session_historical; +use pallet_staking_async_ah_client::{self as ah_client}; +use pallet_staking_async_rc_client::{self as rc_client}; +pub use pallet_timestamp::Call as TimestampCall; +use pallet_transaction_payment::{FeeDetails, FungibleAdapter, RuntimeDispatchInfo}; +use polkadot_primitives::{ + slashing, + vstaging::{ + async_backing::Constraints, CandidateEvent, + CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState, ScrapedOnChainVotes, + }, + AccountId, AccountIndex, ApprovalVotingParams, Balance, BlockNumber, CandidateHash, CoreIndex, + DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, Moment, NodeFeatures, Nonce, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, SessionInfo, Signature, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, +}; +use polkadot_runtime_common::{ + assigned_slots, auctions, crowdloan, identity_migrator, impl_runtime_weights, + impls::{ + ContainsParts, LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, + VersionedLocationConverter, + }, + paras_registrar, paras_sudo_wrapper, prod_or_fast, slots, + traits::OnSwap, + BlockHashCount, BlockLength, SlowAdjustingFeeUpdate, +}; +use polkadot_runtime_parachains::{ + assigner_coretime as parachains_assigner_coretime, configuration as parachains_configuration, + configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio, + coretime, disputes as parachains_disputes, + disputes::slashing as parachains_slashing, + dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion, + inclusion::{AggregateMessageOrigin, UmpQueueId}, + initializer as parachains_initializer, on_demand as parachains_on_demand, + origin as parachains_origin, paras as parachains_paras, + paras_inherent as parachains_paras_inherent, reward_points as parachains_reward_points, + runtime_api_impl::{ + v11 as parachains_runtime_api_impl, vstaging as parachains_staging_runtime_api_impl, + }, + scheduler as parachains_scheduler, session_info as parachains_session_info, + shared as parachains_shared, +}; +use scale_info::TypeInfo; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_beefy::{ + ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, + mmr::{BeefyDataProvider, MmrLeafVersion}, +}; +use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +use sp_runtime::{ + generic, impl_opaque_keys, + traits::{ + AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, Get, IdentityLookup, + Keccak256, OpaqueKeys, SaturatedConversion, Verify, + }, + transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Percent, Permill, +}; +use sp_staking::SessionIndex; +#[cfg(any(feature = "std", test))] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; +use xcm::{ + latest::prelude::*, VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, + VersionedXcm, +}; +use xcm_builder::PayOverXcm; +use xcm_runtime_apis::{ + dry_run::{CallDryRunEffects, Error as XcmDryRunApiError, XcmDryRunEffects}, + fees::Error as XcmPaymentApiError, +}; + +/// Constant values used within the runtime. +use pallet_staking_async_rc_runtime_constants::{ + currency::*, + fee::*, + system_parachain::{coretime::TIMESLICE_PERIOD, ASSET_HUB_ID, BROKER_ID}, + time::*, +}; + +mod genesis_config_presets; +mod weights; +pub mod xcm_config; + +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + +// Governance and configurations. +pub mod governance; +use governance::{ + pallet_custom_origins, AuctionAdmin, FellowshipAdmin, GeneralAdmin, LeaseAdmin, StakingAdmin, + Treasurer, TreasurySpender, +}; + +#[cfg(test)] +mod tests; + +impl_runtime_weights!(pallet_staking_async_rc_runtime_constants); + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +#[cfg(feature = "std")] +pub mod fast_runtime_binary { + include!(concat!(env!("OUT_DIR"), "/fast_runtime_binary.rs")); +} + +/// Runtime version (Westend). +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: alloc::borrow::Cow::Borrowed("westend-next"), + impl_name: alloc::borrow::Cow::Borrowed("parity-westend"), + authoring_version: 2, + spec_version: 1_017_001, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 27, + system_version: 1, +}; + +/// The BABE epoch configuration at genesis. +pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = + sp_consensus_babe::BabeEpochConfiguration { + c: PRIMARY_PROBABILITY, + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryVRFSlots, + }; + +/// Native version. +#[cfg(any(feature = "std", test))] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + pub const SS58Prefix: u8 = 42; +} + +#[derive_impl(frame_system::config_preludes::RelayChainDefaultConfig)] +impl frame_system::Config for Runtime { + type BaseCallFilter = EverythingBut; + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; + type Nonce = Nonce; + type Hash = Hash; + type AccountId = AccountId; + type Block = Block; + type BlockHashCount = BlockHashCount; + type DbWeight = RocksDbWeight; + type Version = Version; + type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = weights::frame_system::WeightInfo; + type ExtensionsWeightInfo = weights::frame_system_extensions::WeightInfo; + type SS58Prefix = SS58Prefix; + type MaxConsumers = frame_support::traits::ConstU32<16>; + type MultiBlockMigrator = MultiBlockMigrations; +} + +parameter_types! { + pub MaximumSchedulerWeight: frame_support::weights::Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; + pub const MaxScheduledPerBlock: u32 = 50; + pub const NoPreimagePostponement: Option = Some(10); +} + +impl pallet_scheduler::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type RuntimeCall = RuntimeCall; + type MaximumWeight = MaximumSchedulerWeight; + // The goal of having ScheduleOrigin include AuctionAdmin is to allow the auctions track of + // OpenGov to schedule periodic auctions. + type ScheduleOrigin = EitherOf, AuctionAdmin>; + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = weights::pallet_scheduler::WeightInfo; + type OriginPrivilegeCmp = frame_support::traits::EqualPrivilegeOnly; + type Preimages = Preimage; + type BlockNumberProvider = frame_system::Pallet; +} + +parameter_types! { + pub const PreimageBaseDeposit: Balance = deposit(2, 64); + pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); +} + +/// Dynamic params that can be adjusted at runtime. +#[dynamic_params(RuntimeParameters, pallet_parameters::Parameters::)] +pub mod dynamic_params { + use super::*; + + /// Parameters used to calculate era payouts, see + /// [`polkadot_runtime_common::impls::EraPayoutParams`]. + #[dynamic_pallet_params] + #[codec(index = 0)] + pub mod inflation { + /// Minimum inflation rate used to calculate era payouts. + #[codec(index = 0)] + pub static MinInflation: Perquintill = Perquintill::from_rational(25u64, 1000u64); + + /// Maximum inflation rate used to calculate era payouts. + #[codec(index = 1)] + pub static MaxInflation: Perquintill = Perquintill::from_rational(10u64, 100u64); + + /// Ideal stake ratio used to calculate era payouts. + #[codec(index = 2)] + pub static IdealStake: Perquintill = Perquintill::from_rational(50u64, 100u64); + + /// Falloff used to calculate era payouts. + #[codec(index = 3)] + pub static Falloff: Perquintill = Perquintill::from_rational(50u64, 1000u64); + + /// Whether to use auction slots or not in the calculation of era payouts. If set to true, + /// the `legacy_auction_proportion` of 60% will be used in the calculation of era payouts. + #[codec(index = 4)] + pub static UseAuctionSlots: bool = false; + } +} + +#[cfg(feature = "runtime-benchmarks")] +impl Default for RuntimeParameters { + fn default() -> Self { + RuntimeParameters::Inflation(dynamic_params::inflation::Parameters::MinInflation( + dynamic_params::inflation::MinInflation, + Some(Perquintill::from_rational(25u64, 1000u64)), + )) + } +} + +impl pallet_parameters::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeParameters = RuntimeParameters; + type AdminOrigin = DynamicParameterOrigin; + type WeightInfo = weights::pallet_parameters::WeightInfo; +} + +/// Defines what origin can modify which dynamic parameters. +pub struct DynamicParameterOrigin; +impl EnsureOriginWithArg for DynamicParameterOrigin { + type Success = (); + + fn try_origin( + origin: RuntimeOrigin, + key: &RuntimeParametersKey, + ) -> Result { + use crate::RuntimeParametersKey::*; + + match key { + Inflation(_) => frame_system::ensure_root(origin.clone()), + } + .map_err(|_| origin) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin(_key: &RuntimeParametersKey) -> Result { + // Provide the origin for the parameter returned by `Default`: + Ok(RuntimeOrigin::root()) + } +} + +impl pallet_preimage::Config for Runtime { + type WeightInfo = weights::pallet_preimage::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; +} + +parameter_types! { + pub const EpochDuration: u64 = prod_or_fast!( + EPOCH_DURATION_IN_SLOTS as u64, + 1 * MINUTES as u64 + ); + pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; + pub const ReportLongevity: u64 = 256 * EpochDuration::get(); +} + +impl pallet_babe::Config for Runtime { + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; + + // session module is the trigger + type EpochChangeTrigger = pallet_babe::ExternalTrigger; + + type DisabledValidators = Session; + + type WeightInfo = (); + + type MaxAuthorities = MaxAuthorities; + type MaxNominators = ConstU32<1024>; + + type KeyOwnerProof = sp_session::MembershipProof; + + type EquivocationReportSystem = + pallet_babe::EquivocationReportSystem; +} + +parameter_types! { + pub const IndexDeposit: Balance = 100 * CENTS; +} + +impl pallet_indices::Config for Runtime { + type AccountIndex = AccountIndex; + type Currency = Balances; + type Deposit = IndexDeposit; + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_indices::WeightInfo; +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type DustRemoval = (); + type RuntimeEvent = RuntimeEvent; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type WeightInfo = weights::pallet_balances::WeightInfo; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = RuntimeFreezeReason; + type MaxFreezes = VariantCountOf; + type DoneSlashHandler = (); +} + +parameter_types! { + pub const BeefySetIdSessionEntries: u32 = 1024; +} + +impl pallet_beefy::Config for Runtime { + type BeefyId = BeefyId; + type MaxAuthorities = MaxAuthorities; + type MaxNominators = ::MaxNominators; + type MaxSetIdSessionEntries = BeefySetIdSessionEntries; + type OnNewValidatorSet = BeefyMmrLeaf; + type AncestryHelper = BeefyMmrLeaf; + type WeightInfo = (); + type KeyOwnerProof = sp_session::MembershipProof; + type EquivocationReportSystem = + pallet_beefy::EquivocationReportSystem; +} + +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = mmr::INDEXING_PREFIX; + type Hashing = Keccak256; + type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + type LeafData = pallet_beefy_mmr::Pallet; + type BlockHashProvider = pallet_mmr::DefaultBlockHashProvider; + type WeightInfo = weights::pallet_mmr::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = parachains_paras::benchmarking::mmr_setup::MmrSetup; +} + +/// MMR helper types. +mod mmr { + use super::Runtime; + pub use pallet_mmr::primitives::*; + + pub type Leaf = <::LeafData as LeafDataProvider>::LeafData; + pub type Hashing = ::Hashing; + pub type Hash = ::Output; +} + +parameter_types! { + pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(0, 0); +} + +/// A BEEFY data provider that merkelizes all the parachain heads at the current block +/// (sorted by their parachain id). +pub struct ParaHeadsRootProvider; +impl BeefyDataProvider for ParaHeadsRootProvider { + fn extra_data() -> H256 { + let para_heads: Vec<(u32, Vec)> = + parachains_paras::Pallet::::sorted_para_heads(); + binary_merkle_tree::merkle_root::( + para_heads.into_iter().map(|pair| pair.encode()), + ) + .into() + } +} + +impl pallet_beefy_mmr::Config for Runtime { + type LeafVersion = LeafVersion; + type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; + type LeafExtra = H256; + type BeefyDataProvider = ParaHeadsRootProvider; + type WeightInfo = weights::pallet_beefy_mmr::WeightInfo; +} + +parameter_types! { + pub const TransactionByteFee: Balance = 10 * MILLICENTS; + /// This value increases the priority of `Operational` transactions by adding + /// a "virtual tip" that's equal to the `OperationalFeeMultiplier * final_fee`. + pub const OperationalFeeMultiplier: u8 = 5; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = FungibleAdapter>; + type OperationalFeeMultiplier = OperationalFeeMultiplier; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type WeightInfo = weights::pallet_transaction_payment::WeightInfo; +} + +parameter_types! { + pub const MinimumPeriod: u64 = SLOT_DURATION / 2; +} +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Babe; + type MinimumPeriod = MinimumPeriod; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = StakingAsyncAhClient; +} + +parameter_types! { + pub const Period: BlockNumber = 10 * MINUTES; + pub const Offset: BlockNumber = 0; +} + +impl_opaque_keys! { + pub struct SessionKeys { + pub grandpa: Grandpa, + pub babe: Babe, + pub para_validator: Initializer, + pub para_assignment: ParaSessionInfo, + pub authority_discovery: AuthorityDiscovery, + pub beefy: Beefy, + } +} + +pub struct IdentityValidatorIdeOf; +impl sp_runtime::traits::Convert> for IdentityValidatorIdeOf { + fn convert(account: AccountId) -> Option { + Some(account) + } +} + +/// A testing type that implements SessionManager, it receives a new validator set from +/// `StakingAsyncAhClient`, but it prevents them from being passed over to the session pallet and +/// just uses the previous session keys. +pub struct AckButPreviousSessionValidatorsPersist(core::marker::PhantomData); + +impl> pallet_session::SessionManager + for AckButPreviousSessionValidatorsPersist +{ + fn end_session(end_index: SessionIndex) { + >::end_session(end_index); + } + fn new_session(new_index: SessionIndex) -> Option> { + match >::new_session(new_index) { + Some(_new_ignored) => { + let current_validators = pallet_session::Validators::::get(); + log::info!(target: "runtime", ">> received {} validators, but overriding with {} old ones", _new_ignored.len(), current_validators.len()); + Some(current_validators) + }, + None => None, + } + } + fn new_session_genesis(new_index: SessionIndex) -> Option> { + >::new_session_genesis(new_index) + } + fn start_session(start_index: SessionIndex) { + >::start_session(start_index); + } +} + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = AccountId; + type ValidatorIdOf = IdentityValidatorIdeOf; + type ShouldEndSession = Babe; + type NextSessionRotation = Babe; + type SessionManager = AckButPreviousSessionValidatorsPersist< + session_historical::NoteHistoricalRoot, + >; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy; + type WeightInfo = weights::pallet_session::WeightInfo; +} + +impl session_historical::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type FullIdentification = sp_staking::Exposure; + type FullIdentificationOf = ah_client::DefaultExposureOf; +} + +pub struct AssetHubLocation; +impl Get for AssetHubLocation { + fn get() -> Location { + Location::new(0, [Junction::Parachain(1000)]) + } +} + +pub struct AssetHubNextLocation; +impl Get for AssetHubNextLocation { + fn get() -> Location { + // TODO: once we are done with AH-next, replace with original AH id. + Location::new(0, [Junction::Parachain(1100)]) + } +} + +#[derive(Encode, Decode)] +enum AssetHubRuntimePallets { + #[codec(index = 89)] + RcClient(RcClientCalls), +} + +/// Call encoding for the calls needed from the rc-client pallet. +#[derive(Encode, Decode)] +enum RcClientCalls { + /// A session with the given index has started. + #[codec(index = 0)] + RelaySessionReport(rc_client::SessionReport), + #[codec(index = 1)] + RelayNewOffence(SessionIndex, Vec>), +} + +pub struct XcmToAssetHub>(PhantomData<(T, AssetHubId)>); +impl> ah_client::SendToAssetHub for XcmToAssetHub { + type AccountId = AccountId; + + fn relay_session_report(session_report: rc_client::SessionReport) { + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Self::mk_asset_hub_call(RcClientCalls::RelaySessionReport(session_report)), + ]); + if let Err(err) = send_xcm::(AssetHubNextLocation::get(), message) { + log::error!(target: "runtime", "Failed to send relay session report message: {:?}", err); + } + } + + fn relay_new_offence( + session_index: SessionIndex, + offences: Vec>, + ) { + let message = Xcm(vec![ + Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }, + Self::mk_asset_hub_call(RcClientCalls::RelayNewOffence(session_index, offences)), + ]); + if let Err(err) = send_xcm::(AssetHubNextLocation::get(), message) { + log::error!(target: "runtime", "Failed to send relay offence message: {:?}", err); + } + } +} + +impl> XcmToAssetHub { + fn mk_asset_hub_call( + call: RcClientCalls<::AccountId>, + ) -> Instruction<()> { + Instruction::Transact { + origin_kind: OriginKind::Superuser, + fallback_max_weight: None, + call: AssetHubRuntimePallets::RcClient(call).encode().into(), + } + } +} + +pub struct EnsureAssetHub; +impl frame_support::traits::EnsureOrigin for EnsureAssetHub { + type Success = (); + fn try_origin(o: RuntimeOrigin) -> Result { + match >>::into( + o.clone(), + ) { + Ok(parachains_origin::Origin::Parachain(id)) if id == 1100.into() => Ok(()), + _ => Err(o), + } + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(RuntimeOrigin::root()) + } +} + +impl pallet_staking_async_ah_client::Config for Runtime { + type CurrencyBalance = Balance; + type AssetHubOrigin = + frame_support::traits::EitherOfDiverse, EnsureAssetHub>; + type AdminOrigin = EnsureRoot; + type SessionInterface = Self; + type SendToAssetHub = XcmToAssetHub; + type MinimumValidatorSetSize = ConstU32<333>; + type UnixTime = Timestamp; + type PointsPerBlock = ConstU32<20>; + type Fallback = Staking; +} + +parameter_types! { + // phase durations. 1/4 of the last session for each. + pub SignedPhase: u32 = prod_or_fast!( + EPOCH_DURATION_IN_SLOTS / 4, + (1 * MINUTES).min(EpochDuration::get().saturated_into::() / 2) + ); + pub UnsignedPhase: u32 = prod_or_fast!( + EPOCH_DURATION_IN_SLOTS / 4, + (1 * MINUTES).min(EpochDuration::get().saturated_into::() / 2) + ); + + // signed config + pub const SignedMaxSubmissions: u32 = 128; + pub const SignedMaxRefunds: u32 = 128 / 4; + pub const SignedFixedDeposit: Balance = deposit(2, 0); + pub const SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); + pub const SignedDepositByte: Balance = deposit(0, 10) / 1024; + // Each good submission will get 1 WND as reward + pub SignedRewardBase: Balance = 1 * UNITS; + + // 1 hour session, 15 minutes unsigned phase, 4 offchain executions. + pub OffchainRepeat: BlockNumber = UnsignedPhase::get() / 4; + + pub const MaxElectingVoters: u32 = 22_500; + /// We take the top 22500 nominators as electing voters and all of the validators as electable + /// targets. Whilst this is the case, we cannot and shall not increase the size of the + /// validator intentions. + pub ElectionBounds: frame_election_provider_support::bounds::ElectionBounds = + ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); + // Maximum winners that can be chosen as active validators + pub const MaxActiveValidators: u32 = 1000; + // One page only, fill the whole page with the `MaxActiveValidators`. + pub const MaxWinnersPerPage: u32 = MaxActiveValidators::get(); + // Unbonded, thus the max backers per winner maps to the max electing voters limit. + pub const MaxBackersPerWinner: u32 = MaxElectingVoters::get(); +} + +frame_election_provider_support::generate_solution_type!( + #[compact] + pub struct NposCompactSolution16::< + VoterIndex = u32, + TargetIndex = u16, + Accuracy = sp_runtime::PerU16, + MaxVoters = MaxElectingVoters, + >(16) +); + +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type Sort = ConstBool; + type System = Runtime; + type Solver = SequentialPhragmen< + AccountId, + pallet_election_provider_multi_phase::SolutionAccuracyOf, + >; + type DataProvider = Staking; + type WeightInfo = (); + type Bounds = ElectionBounds; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; +} + +impl pallet_election_provider_multi_phase::MinerConfig for Runtime { + type AccountId = AccountId; + type MaxLength = OffchainSolutionLengthLimit; + type MaxWeight = OffchainSolutionWeightLimit; + type Solution = NposCompactSolution16; + type MaxVotesPerVoter = < + ::DataProvider + as + frame_election_provider_support::ElectionDataProvider + >::MaxVotesPerVoter; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinners = MaxWinnersPerPage; + + // The unsigned submissions have to respect the weight of the submit_unsigned call, thus their + // weight estimate function is wired to this call's weight. + fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight { + < + ::WeightInfo + as + pallet_election_provider_multi_phase::WeightInfo + >::submit_unsigned(v, t, a, d) + } +} + +impl pallet_election_provider_multi_phase::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type EstimateCallFee = TransactionPayment; + type SignedPhase = SignedPhase; + type UnsignedPhase = UnsignedPhase; + type SignedMaxSubmissions = SignedMaxSubmissions; + type SignedMaxRefunds = SignedMaxRefunds; + type SignedRewardBase = SignedRewardBase; + type SignedDepositBase = pallet_election_provider_multi_phase::GeometricDepositBase< + Balance, + SignedFixedDeposit, + SignedDepositIncreaseFactor, + >; + type SignedDepositByte = SignedDepositByte; + type SignedDepositWeight = (); + type SignedMaxWeight = + ::MaxWeight; + type MinerConfig = Self; + type SlashHandler = (); // burn slashes + type RewardHandler = (); // rewards are minted from the void + type BetterSignedThreshold = (); + type OffchainRepeat = OffchainRepeat; + type MinerTxPriority = NposSolutionPriority; + type MaxWinners = MaxWinnersPerPage; + type MaxBackersPerWinner = MaxBackersPerWinner; + type DataProvider = Staking; + #[cfg(any(feature = "fast-runtime", feature = "runtime-benchmarks"))] + type Fallback = onchain::OnChainExecution; + #[cfg(not(any(feature = "fast-runtime", feature = "runtime-benchmarks")))] + type Fallback = frame_election_provider_support::NoElection<( + AccountId, + BlockNumber, + Staking, + MaxWinnersPerPage, + MaxBackersPerWinner, + )>; + type GovernanceFallback = onchain::OnChainExecution; + type Solver = SequentialPhragmen< + AccountId, + pallet_election_provider_multi_phase::SolutionAccuracyOf, + (), + >; + type BenchmarkingConfig = polkadot_runtime_common::elections::BenchmarkConfig; + type ForceOrigin = EnsureRoot; + type WeightInfo = (); + type ElectionBounds = ElectionBounds; +} + +parameter_types! { + pub const SessionsPerEra: SessionIndex = 3; + pub const BondingDuration: sp_staking::EraIndex = 3; + pub const SlashDeferDuration: sp_staking::EraIndex = 1; + pub const MaxExposurePageSize: u32 = 64; + pub const MaxNominations: u32 = ::LIMIT as u32; + pub const MaxControllersInDeprecationBatch: u32 = 751; +} + +impl pallet_staking::Config for Runtime { + type OldCurrency = Balances; + type Currency = Balances; + type UnixTime = Timestamp; + type SessionInterface = Self; + type CurrencyBalance = Balance; + type RuntimeHoldReason = RuntimeHoldReason; + type CurrencyToVote = sp_staking::currency_to_vote::U128CurrencyToVote; + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = SlashDeferDuration; + type AdminOrigin = EitherOf, StakingAdmin>; + type EraPayout = (); + type MaxExposurePageSize = MaxExposurePageSize; + type NextNewSession = Session; + type ElectionProvider = ElectionProviderMultiPhase; + type GenesisElectionProvider = onchain::OnChainExecution; + type VoterList = VoterList; + type TargetList = pallet_staking::UseValidatorsMap; + type MaxValidatorSet = MaxActiveValidators; + type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; + type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; + type HistoryDepth = frame_support::traits::ConstU32<84>; + type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; + type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; + type EventListeners = (); + type WeightInfo = (); + type Filter = Nothing; +} + +const THRESHOLDS: [VoteWeight; 9] = [10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; + +parameter_types! { + pub const BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; +} + +type VoterBagsListInstance = pallet_bags_list::Instance1; +impl pallet_bags_list::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ScoreProvider = Staking; + type WeightInfo = (); + type BagThresholds = BagThresholds; + type Score = sp_npos_elections::VoteWeight; +} + +parameter_types! { + pub const SpendPeriod: BlockNumber = 6 * DAYS; + pub const Burn: Permill = Permill::from_perthousand(2); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + pub const PayoutSpendPeriod: BlockNumber = 30 * DAYS; + // The asset's interior location for the paying account. This is the Treasury + // pallet instance (which sits at index 37). + pub TreasuryInteriorLocation: InteriorLocation = PalletInstance(37).into(); + + pub const TipCountdown: BlockNumber = 1 * DAYS; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: Balance = 100 * CENTS; + pub const DataDepositPerByte: Balance = 1 * CENTS; + pub const MaxApprovals: u32 = 100; + pub const MaxAuthorities: u32 = 100_000; + pub const MaxKeys: u32 = 10_000; + pub const MaxPeerInHeartbeats: u32 = 10_000; + pub const MaxBalance: Balance = Balance::max_value(); +} + +impl pallet_treasury::Config for Runtime { + type PalletId = TreasuryPalletId; + type Currency = Balances; + type RejectOrigin = EitherOfDiverse, Treasurer>; + type RuntimeEvent = RuntimeEvent; + type SpendPeriod = SpendPeriod; + type Burn = Burn; + type BurnDestination = (); + type MaxApprovals = MaxApprovals; + type WeightInfo = weights::pallet_treasury::WeightInfo; + type SpendFunds = (); + type SpendOrigin = TreasurySpender; + type AssetKind = VersionedLocatableAsset; + type Beneficiary = VersionedLocation; + type BeneficiaryLookup = IdentityLookup; + type Paymaster = PayOverXcm< + TreasuryInteriorLocation, + crate::xcm_config::XcmRouter, + crate::XcmPallet, + ConstU32<{ 6 * HOURS }>, + Self::Beneficiary, + Self::AssetKind, + LocatableAssetConverter, + VersionedLocationConverter, + >; + type BalanceConverter = UnityOrOuterConversion< + ContainsParts< + FromContains< + xcm_builder::IsChildSystemParachain, + xcm_builder::IsParentsOnly>, + >, + >, + AssetRate, + >; + type PayoutPeriod = PayoutSpendPeriod; + type BlockNumberProvider = System; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::TreasuryArguments; +} + +impl pallet_offences::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type IdentificationTuple = session_historical::IdentificationTuple; + type OnOffenceHandler = StakingAsyncAhClient; +} + +impl pallet_authority_discovery::Config for Runtime { + type MaxAuthorities = MaxAuthorities; +} + +parameter_types! { + pub const NposSolutionPriority: TransactionPriority = TransactionPriority::max_value() / 2; +} + +parameter_types! { + pub const MaxSetIdSessionEntries: u32 = 1024; +} + +impl pallet_grandpa::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + + type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; + type MaxNominators = ::MaxNominators; + type MaxSetIdSessionEntries = MaxSetIdSessionEntries; + + type KeyOwnerProof = sp_session::MembershipProof; + + type EquivocationReportSystem = + pallet_grandpa::EquivocationReportSystem; +} + +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; +} + +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, extension: TxExtension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + +/// Submits a transaction with the node's public and signature type. Adheres to the signed extension +/// format of the chain. +impl frame_system::offchain::CreateSignedTransaction for Runtime +where + RuntimeCall: From, +{ + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( + call: RuntimeCall, + public: ::Signer, + account: AccountId, + nonce: ::Nonce, + ) -> Option { + use sp_runtime::traits::StaticLookup; + // take the biggest period possible. + let period = + BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; + + let current_block = System::block_number() + .saturated_into::() + // The `System::block_number` is initialized with `n+1`, + // so the actual block number is `n`. + .saturating_sub(1); + let tip = 0; + let tx_ext: TxExtension = ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckMortality::::from(generic::Era::mortal( + period, + current_block, + )), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + frame_metadata_hash_extension::CheckMetadataHash::::new(true), + frame_system::WeightReclaim::::new(), + ) + .into(); + let raw_payload = SignedPayload::new(call, tx_ext) + .map_err(|e| { + log::warn!("Unable to create signed payload: {:?}", e); + }) + .ok()?; + let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; + let (call, tx_ext, _) = raw_payload.deconstruct(); + let address = ::Lookup::unlookup(account); + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) + } +} + +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + +parameter_types! { + // Minimum 100 bytes/KSM deposited (1 CENT/byte) + pub const BasicDeposit: Balance = 1000 * CENTS; // 258 bytes on-chain + pub const ByteDeposit: Balance = deposit(0, 1); + pub const UsernameDeposit: Balance = deposit(0, 32); + pub const SubAccountDeposit: Balance = 200 * CENTS; // 53 bytes on-chain + pub const MaxSubAccounts: u32 = 100; + pub const MaxAdditionalFields: u32 = 100; + pub const MaxRegistrars: u32 = 20; +} + +impl pallet_identity::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type Slashed = (); + type BasicDeposit = BasicDeposit; + type ByteDeposit = ByteDeposit; + type UsernameDeposit = UsernameDeposit; + type SubAccountDeposit = SubAccountDeposit; + type MaxSubAccounts = MaxSubAccounts; + type IdentityInformation = IdentityInfo; + type MaxRegistrars = MaxRegistrars; + type ForceOrigin = EitherOf, GeneralAdmin>; + type RegistrarOrigin = EitherOf, GeneralAdmin>; + type OffchainSignature = Signature; + type SigningPublicKey = ::Signer; + type UsernameAuthorityOrigin = EnsureRoot; + type PendingUsernameExpiration = ConstU32<{ 7 * DAYS }>; + type UsernameGracePeriod = ConstU32<{ 30 * DAYS }>; + type MaxSuffixLength = ConstU32<7>; + type MaxUsernameLength = ConstU32<32>; + type WeightInfo = weights::pallet_identity::WeightInfo; +} + +impl pallet_utility::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = weights::pallet_utility::WeightInfo; +} + +parameter_types! { + // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + pub const DepositBase: Balance = deposit(1, 88); + // Additional storage item size of 32 bytes. + pub const DepositFactor: Balance = deposit(0, 32); + pub const MaxSignatories: u32 = 100; +} + +impl pallet_multisig::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type DepositBase = DepositBase; + type DepositFactor = DepositFactor; + type MaxSignatories = MaxSignatories; + type WeightInfo = weights::pallet_multisig::WeightInfo; + type BlockNumberProvider = frame_system::Pallet; +} + +parameter_types! { + pub const ConfigDepositBase: Balance = 500 * CENTS; + pub const FriendDepositFactor: Balance = 50 * CENTS; + pub const MaxFriends: u16 = 9; + pub const RecoveryDeposit: Balance = 500 * CENTS; +} + +impl pallet_recovery::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type RuntimeCall = RuntimeCall; + type BlockNumberProvider = System; + type Currency = Balances; + type ConfigDepositBase = ConfigDepositBase; + type FriendDepositFactor = FriendDepositFactor; + type MaxFriends = MaxFriends; + type RecoveryDeposit = RecoveryDeposit; +} + +parameter_types! { + pub const MinVestedTransfer: Balance = 100 * CENTS; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); +} + +impl pallet_vesting::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type BlockNumberToBalance = ConvertInto; + type MinVestedTransfer = MinVestedTransfer; + type WeightInfo = weights::pallet_vesting::WeightInfo; + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; + type BlockNumberProvider = System; + const MAX_VESTING_SCHEDULES: u32 = 28; +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = weights::pallet_sudo::WeightInfo; +} + +parameter_types! { + // One storage item; key size 32, value size 8; . + pub const ProxyDepositBase: Balance = deposit(1, 8); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + pub const MaxProxies: u16 = 32; + pub const AnnouncementDepositBase: Balance = deposit(1, 8); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); + pub const MaxPending: u16 = 32; +} + +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + DecodeWithMemTracking, + RuntimeDebug, + MaxEncodedLen, + TypeInfo, +)] +pub enum ProxyType { + Any, + NonTransfer, + Governance, + Staking, + SudoBalances, + IdentityJudgement, + CancelProxy, + Auction, + NominationPools, + ParaRegistration, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => matches!( + c, + RuntimeCall::System(..) | + RuntimeCall::Babe(..) | + RuntimeCall::Timestamp(..) | + RuntimeCall::Indices(pallet_indices::Call::claim{..}) | + RuntimeCall::Indices(pallet_indices::Call::free{..}) | + RuntimeCall::Indices(pallet_indices::Call::freeze{..}) | + // Specifically omitting Indices `transfer`, `force_transfer` + // Specifically omitting the entire Balances pallet + RuntimeCall::Session(..) | + RuntimeCall::Grandpa(..) | + RuntimeCall::Utility(..) | + RuntimeCall::Identity(..) | + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::Whitelist(..) | + RuntimeCall::Recovery(pallet_recovery::Call::as_recovered{..}) | + RuntimeCall::Recovery(pallet_recovery::Call::vouch_recovery{..}) | + RuntimeCall::Recovery(pallet_recovery::Call::claim_recovery{..}) | + RuntimeCall::Recovery(pallet_recovery::Call::close_recovery{..}) | + RuntimeCall::Recovery(pallet_recovery::Call::remove_recovery{..}) | + RuntimeCall::Recovery(pallet_recovery::Call::cancel_recovered{..}) | + // Specifically omitting Recovery `create_recovery`, `initiate_recovery` + RuntimeCall::Vesting(pallet_vesting::Call::vest{..}) | + RuntimeCall::Vesting(pallet_vesting::Call::vest_other{..}) | + // Specifically omitting Vesting `vested_transfer`, and `force_vested_transfer` + RuntimeCall::Scheduler(..) | + // Specifically omitting Sudo pallet + RuntimeCall::Proxy(..) | + RuntimeCall::Multisig(..) | + RuntimeCall::Registrar(paras_registrar::Call::register{..}) | + RuntimeCall::Registrar(paras_registrar::Call::deregister{..}) | + // Specifically omitting Registrar `swap` + RuntimeCall::Registrar(paras_registrar::Call::reserve{..}) | + RuntimeCall::Crowdloan(..) | + RuntimeCall::Slots(..) | + RuntimeCall::Auctions(..) + ), + ProxyType::Staking => { + matches!(c, RuntimeCall::Session(..) | RuntimeCall::Utility(..)) + }, + ProxyType::NominationPools => { + matches!(c,| RuntimeCall::Utility(..)) + }, + ProxyType::SudoBalances => match c { + RuntimeCall::Sudo(pallet_sudo::Call::sudo { call: ref x }) => { + matches!(x.as_ref(), &RuntimeCall::Balances(..)) + }, + RuntimeCall::Utility(..) => true, + _ => false, + }, + ProxyType::Governance => matches!( + c, + // OpenGov calls + RuntimeCall::ConvictionVoting(..) | + RuntimeCall::Referenda(..) | + RuntimeCall::Whitelist(..) + ), + ProxyType::IdentityJudgement => matches!( + c, + RuntimeCall::Identity(pallet_identity::Call::provide_judgement { .. }) | + RuntimeCall::Utility(..) + ), + ProxyType::CancelProxy => { + matches!(c, RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. })) + }, + ProxyType::Auction => matches!( + c, + RuntimeCall::Auctions(..) | + RuntimeCall::Crowdloan(..) | + RuntimeCall::Registrar(..) | + RuntimeCall::Slots(..) + ), + ProxyType::ParaRegistration => matches!( + c, + RuntimeCall::Registrar(paras_registrar::Call::reserve { .. }) | + RuntimeCall::Registrar(paras_registrar::Call::register { .. }) | + RuntimeCall::Utility(pallet_utility::Call::batch { .. }) | + RuntimeCall::Utility(pallet_utility::Call::batch_all { .. }) | + RuntimeCall::Utility(pallet_utility::Call::force_batch { .. }) | + RuntimeCall::Proxy(pallet_proxy::Call::remove_proxy { .. }) + ), + } + } + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::NonTransfer, _) => true, + _ => false, + } + } +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = MaxProxies; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = MaxPending; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; + type BlockNumberProvider = frame_system::Pallet; +} + +impl parachains_origin::Config for Runtime {} + +impl parachains_configuration::Config for Runtime { + type WeightInfo = weights::polkadot_runtime_parachains_configuration::WeightInfo; +} + +impl parachains_shared::Config for Runtime { + type DisabledValidators = Session; +} + +impl parachains_session_info::Config for Runtime { + type ValidatorSet = Historical; +} + +impl parachains_inclusion::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type DisputesHandler = ParasDisputes; + type RewardValidators = + parachains_reward_points::RewardValidatorsWithEraPoints; + type MessageQueue = MessageQueue; + type WeightInfo = weights::polkadot_runtime_parachains_inclusion::WeightInfo; +} + +parameter_types! { + pub const ParasUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); +} + +impl parachains_paras::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::polkadot_runtime_parachains_paras::WeightInfo; + type UnsignedPriority = ParasUnsignedPriority; + type QueueFootprinter = ParaInclusion; + type NextSessionRotation = Babe; + type OnNewHead = (); + type AssignCoretime = CoretimeAssignmentProvider; +} + +parameter_types! { + /// Amount of weight that can be spent per block to service messages. + /// + /// # WARNING + /// + /// This is not a good value for para-chains since the `Scheduler` already uses up to 80% block weight. + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(20) * BlockWeights::get().max_block; + pub const MessageQueueHeapSize: u32 = 128 * 1024; + pub const MessageQueueMaxStale: u32 = 48; +} + +/// Message processor to handle any messages that were enqueued into the `MessageQueue` pallet. +pub struct MessageProcessor; +impl ProcessMessage for MessageProcessor { + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + let para = match origin { + AggregateMessageOrigin::Ump(UmpQueueId::Para(para)) => para, + }; + xcm_builder::ProcessXcmMessage::< + Junction, + xcm_executor::XcmExecutor, + RuntimeCall, + >::process_message(message, Junction::Parachain(para.into()), meter, id) + } +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Size = u32; + type HeapSize = MessageQueueHeapSize; + type MaxStale = MessageQueueMaxStale; + type ServiceWeight = MessageQueueServiceWeight; + type IdleMaxServiceWeight = MessageQueueServiceWeight; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = MessageProcessor; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = + pallet_message_queue::mock_helpers::NoopMessageProcessor; + type QueueChangeHandler = ParaInclusion; + type QueuePausedQuery = (); + type WeightInfo = weights::pallet_message_queue::WeightInfo; +} + +impl parachains_dmp::Config for Runtime {} + +parameter_types! { + pub const HrmpChannelSizeAndCapacityWithSystemRatio: Percent = Percent::from_percent(100); +} + +impl parachains_hrmp::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type ChannelManager = EnsureRoot; + type Currency = Balances; + type DefaultChannelSizeAndCapacityWithSystem = ActiveConfigHrmpChannelSizeAndCapacityRatio< + Runtime, + HrmpChannelSizeAndCapacityWithSystemRatio, + >; + type VersionWrapper = crate::XcmPallet; + type WeightInfo = weights::polkadot_runtime_parachains_hrmp::WeightInfo; +} + +impl parachains_paras_inherent::Config for Runtime { + type WeightInfo = weights::polkadot_runtime_parachains_paras_inherent::WeightInfo; +} + +impl parachains_scheduler::Config for Runtime { + // If you change this, make sure the `Assignment` type of the new provider is binary compatible, + // otherwise provide a migration. + type AssignmentProvider = CoretimeAssignmentProvider; +} + +parameter_types! { + pub const BrokerId: u32 = BROKER_ID; + pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); + pub const AssetHubId: u32 = ASSET_HUB_ID; // TODO: replace with ASSET_HUB_NEXT_ID + pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); +} + +pub struct BrokerPot; +impl Get for AssetHubLocation { + fn get() -> InteriorLocation { + Junction::AccountId32 { network: None, id: BrokerPalletId::get().into_account_truncating() } + .into() + } +} + +impl coretime::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type BrokerId = BrokerId; + type BrokerPotLocation = AssetHubLocation; + type WeightInfo = weights::polkadot_runtime_parachains_coretime::WeightInfo; + type SendXcm = crate::xcm_config::XcmRouter; + type AssetTransactor = crate::xcm_config::LocalAssetTransactor; + type AccountToLocation = xcm_builder::AliasesIntoAccountId32< + xcm_config::ThisNetwork, + ::AccountId, + >; + type MaxXcmTransactWeight = MaxXcmTransactWeight; +} + +parameter_types! { + pub const OnDemandTrafficDefaultValue: FixedU128 = FixedU128::from_u32(1); + // Keep 2 timeslices worth of revenue information. + pub const MaxHistoricalRevenue: BlockNumber = 2 * TIMESLICE_PERIOD; + pub const OnDemandPalletId: PalletId = PalletId(*b"py/ondmd"); +} + +impl parachains_on_demand::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type TrafficDefaultValue = OnDemandTrafficDefaultValue; + type WeightInfo = weights::polkadot_runtime_parachains_on_demand::WeightInfo; + type MaxHistoricalRevenue = MaxHistoricalRevenue; + type PalletId = OnDemandPalletId; +} + +impl parachains_assigner_coretime::Config for Runtime {} + +impl parachains_initializer::Config for Runtime { + type Randomness = pallet_babe::RandomnessFromOneEpochAgo; + type ForceOrigin = EnsureRoot; + type WeightInfo = weights::polkadot_runtime_parachains_initializer::WeightInfo; + type CoretimeOnNewSession = Coretime; +} + +impl paras_sudo_wrapper::Config for Runtime {} + +parameter_types! { + pub const PermanentSlotLeasePeriodLength: u32 = 26; + pub const TemporarySlotLeasePeriodLength: u32 = 1; + pub const MaxTemporarySlotPerLeasePeriod: u32 = 5; +} + +impl assigned_slots::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type AssignSlotOrigin = EnsureRoot; + type Leaser = Slots; + type PermanentSlotLeasePeriodLength = PermanentSlotLeasePeriodLength; + type TemporarySlotLeasePeriodLength = TemporarySlotLeasePeriodLength; + type MaxTemporarySlotPerLeasePeriod = MaxTemporarySlotPerLeasePeriod; + type WeightInfo = weights::polkadot_runtime_common_assigned_slots::WeightInfo; +} + +impl parachains_disputes::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RewardValidators = + parachains_reward_points::RewardValidatorsWithEraPoints; + type SlashingHandler = parachains_slashing::SlashValidatorsForDisputes; + type WeightInfo = weights::polkadot_runtime_parachains_disputes::WeightInfo; +} + +impl parachains_slashing::Config for Runtime { + type KeyOwnerProofSystem = Historical; + type KeyOwnerProof = + >::Proof; + type KeyOwnerIdentification = >::IdentificationTuple; + type HandleReports = parachains_slashing::SlashingReportHandler< + Self::KeyOwnerIdentification, + Offences, + ReportLongevity, + >; + type WeightInfo = weights::polkadot_runtime_parachains_disputes_slashing::WeightInfo; + type BenchmarkingConfig = parachains_slashing::BenchConfig<300>; +} + +parameter_types! { + pub const ParaDeposit: Balance = 2000 * CENTS; + pub const RegistrarDataDepositPerByte: Balance = deposit(0, 1); +} + +impl paras_registrar::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type OnSwap = (Crowdloan, Slots, SwapLeases); + type ParaDeposit = ParaDeposit; + type DataDepositPerByte = RegistrarDataDepositPerByte; + type WeightInfo = weights::polkadot_runtime_common_paras_registrar::WeightInfo; +} + +parameter_types! { + pub const LeasePeriod: BlockNumber = 28 * DAYS; +} + +impl slots::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type Registrar = Registrar; + type LeasePeriod = LeasePeriod; + type LeaseOffset = (); + type ForceOrigin = EitherOf, LeaseAdmin>; + type WeightInfo = weights::polkadot_runtime_common_slots::WeightInfo; +} + +parameter_types! { + pub const CrowdloanId: PalletId = PalletId(*b"py/cfund"); + pub const SubmissionDeposit: Balance = 100 * 100 * CENTS; + pub const MinContribution: Balance = 100 * CENTS; + pub const RemoveKeysLimit: u32 = 500; + // Allow 32 bytes for an additional memo to a crowdloan. + pub const MaxMemoLength: u8 = 32; +} + +impl crowdloan::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = CrowdloanId; + type SubmissionDeposit = SubmissionDeposit; + type MinContribution = MinContribution; + type RemoveKeysLimit = RemoveKeysLimit; + type Registrar = Registrar; + type Auctioneer = Auctions; + type MaxMemoLength = MaxMemoLength; + type WeightInfo = weights::polkadot_runtime_common_crowdloan::WeightInfo; +} + +parameter_types! { + // The average auction is 7 days long, so this will be 70% for ending period. + // 5 Days = 72000 Blocks @ 6 sec per block + pub const EndingPeriod: BlockNumber = 5 * DAYS; + // ~ 1000 samples per day -> ~ 20 blocks per sample -> 2 minute samples + pub const SampleLength: BlockNumber = 2 * MINUTES; +} + +impl auctions::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Leaser = Slots; + type Registrar = Registrar; + type EndingPeriod = EndingPeriod; + type SampleLength = SampleLength; + type Randomness = pallet_babe::RandomnessFromOneEpochAgo; + type InitiateOrigin = EitherOf, AuctionAdmin>; + type WeightInfo = weights::polkadot_runtime_common_auctions::WeightInfo; +} + +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Reaper = EnsureSigned; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::polkadot_runtime_common_identity_migrator::WeightInfo; +} + +impl pallet_root_testing::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +parameter_types! { + pub MbmServiceWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; +} + +impl pallet_migrations::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + #[cfg(not(feature = "runtime-benchmarks"))] + type Migrations = pallet_identity::migration::v2::LazyMigrationV1ToV2; + // Benchmarks need mocked migrations to guarantee that they succeed. + #[cfg(feature = "runtime-benchmarks")] + type Migrations = pallet_migrations::mock_helpers::MockedMigrations; + type CursorMaxLen = ConstU32<65_536>; + type IdentifierMaxLen = ConstU32<256>; + type MigrationStatusHandler = (); + type FailedMigrationHandler = frame_support::migrations::FreezeChainOnFailedMigration; + type MaxServiceWeight = MbmServiceWeight; + type WeightInfo = weights::pallet_migrations::WeightInfo; +} + +parameter_types! { + // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) + pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS; + pub const MigrationSignedDepositBase: Balance = 20 * CENTS * 100; + pub const MigrationMaxKeyLen: u32 = 512; +} + +impl pallet_asset_rate::Config for Runtime { + type WeightInfo = weights::pallet_asset_rate::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type CreateOrigin = EnsureRoot; + type RemoveOrigin = EnsureRoot; + type UpdateOrigin = EnsureRoot; + type Currency = Balances; + type AssetKind = ::AssetKind; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = polkadot_runtime_common::impls::benchmarks::AssetRateArguments; +} + +// Notify `coretime` pallet when a lease swap occurs +pub struct SwapLeases; +impl OnSwap for SwapLeases { + fn on_swap(one: ParaId, other: ParaId) { + coretime::Pallet::::on_legacy_lease_swap(one, other); + } +} + +#[frame_support::runtime(legacy_ordering)] +mod runtime { + #[runtime::runtime] + #[runtime::derive( + RuntimeCall, + RuntimeEvent, + RuntimeError, + RuntimeOrigin, + RuntimeFreezeReason, + RuntimeHoldReason, + RuntimeSlashReason, + RuntimeLockId, + RuntimeTask, + RuntimeViewFunction + )] + pub struct Runtime; + + // Basic stuff; balances is uncallable initially. + #[runtime::pallet_index(0)] + pub type System = frame_system; + + // Babe must be before session. + #[runtime::pallet_index(1)] + pub type Babe = pallet_babe; + + #[runtime::pallet_index(2)] + pub type Timestamp = pallet_timestamp; + #[runtime::pallet_index(3)] + pub type Indices = pallet_indices; + #[runtime::pallet_index(4)] + pub type Balances = pallet_balances; + #[runtime::pallet_index(26)] + pub type TransactionPayment = pallet_transaction_payment; + + // Consensus support. + // Authorship must be before session in order to note author in the correct session and era. + #[runtime::pallet_index(5)] + pub type Authorship = pallet_authorship; + #[runtime::pallet_index(6)] + pub type Staking = pallet_staking; + #[runtime::pallet_index(7)] + pub type Offences = pallet_offences; + #[runtime::pallet_index(27)] + pub type Historical = session_historical; + #[runtime::pallet_index(70)] + pub type Parameters = pallet_parameters; + + #[runtime::pallet_index(8)] + pub type Session = pallet_session; + #[runtime::pallet_index(10)] + pub type Grandpa = pallet_grandpa; + #[runtime::pallet_index(12)] + pub type AuthorityDiscovery = pallet_authority_discovery; + + // Utility module. + #[runtime::pallet_index(16)] + pub type Utility = pallet_utility; + + // Less simple identity module. + #[runtime::pallet_index(17)] + pub type Identity = pallet_identity; + + // Social recovery module. + #[runtime::pallet_index(18)] + pub type Recovery = pallet_recovery; + + // Vesting. Usable initially, but removed once all vesting is finished. + #[runtime::pallet_index(19)] + pub type Vesting = pallet_vesting; + + // System scheduler. + #[runtime::pallet_index(20)] + pub type Scheduler = pallet_scheduler; + + // Preimage registrar. + #[runtime::pallet_index(28)] + pub type Preimage = pallet_preimage; + + // Sudo. + #[runtime::pallet_index(21)] + pub type Sudo = pallet_sudo; + + // Proxy module. Late addition. + #[runtime::pallet_index(22)] + pub type Proxy = pallet_proxy; + + // Multisig module. Late addition. + #[runtime::pallet_index(23)] + pub type Multisig = pallet_multisig; + + // Election pallet. Only works with staking, but placed here to maintain indices. + #[runtime::pallet_index(24)] + pub type ElectionProviderMultiPhase = pallet_election_provider_multi_phase; + + #[runtime::pallet_index(25)] + pub type VoterList = pallet_bags_list; + + // OpenGov + #[runtime::pallet_index(31)] + pub type ConvictionVoting = pallet_conviction_voting; + #[runtime::pallet_index(32)] + pub type Referenda = pallet_referenda; + #[runtime::pallet_index(35)] + pub type Origins = pallet_custom_origins; + #[runtime::pallet_index(36)] + pub type Whitelist = pallet_whitelist; + + // Treasury + #[runtime::pallet_index(37)] + pub type Treasury = pallet_treasury; + + // Parachains pallets. Start indices at 40 to leave room. + #[runtime::pallet_index(41)] + pub type ParachainsOrigin = parachains_origin; + #[runtime::pallet_index(42)] + pub type Configuration = parachains_configuration; + #[runtime::pallet_index(43)] + pub type ParasShared = parachains_shared; + #[runtime::pallet_index(44)] + pub type ParaInclusion = parachains_inclusion; + #[runtime::pallet_index(45)] + pub type ParaInherent = parachains_paras_inherent; + #[runtime::pallet_index(46)] + pub type ParaScheduler = parachains_scheduler; + #[runtime::pallet_index(47)] + pub type Paras = parachains_paras; + #[runtime::pallet_index(48)] + pub type Initializer = parachains_initializer; + #[runtime::pallet_index(49)] + pub type Dmp = parachains_dmp; + // RIP Ump 50 + #[runtime::pallet_index(51)] + pub type Hrmp = parachains_hrmp; + #[runtime::pallet_index(52)] + pub type ParaSessionInfo = parachains_session_info; + #[runtime::pallet_index(53)] + pub type ParasDisputes = parachains_disputes; + #[runtime::pallet_index(54)] + pub type ParasSlashing = parachains_slashing; + #[runtime::pallet_index(56)] + pub type OnDemandAssignmentProvider = parachains_on_demand; + #[runtime::pallet_index(57)] + pub type CoretimeAssignmentProvider = parachains_assigner_coretime; + + // Parachain Onboarding Pallets. Start indices at 60 to leave room. + #[runtime::pallet_index(60)] + pub type Registrar = paras_registrar; + #[runtime::pallet_index(61)] + pub type Slots = slots; + #[runtime::pallet_index(62)] + pub type ParasSudoWrapper = paras_sudo_wrapper; + #[runtime::pallet_index(63)] + pub type Auctions = auctions; + #[runtime::pallet_index(64)] + pub type Crowdloan = crowdloan; + #[runtime::pallet_index(65)] + pub type AssignedSlots = assigned_slots; + #[runtime::pallet_index(66)] + pub type Coretime = coretime; + + #[runtime::pallet_index(67)] + pub type StakingAsyncAhClient = pallet_staking_async_ah_client; + + // Migrations pallet + #[runtime::pallet_index(98)] + pub type MultiBlockMigrations = pallet_migrations; + + // Pallet for sending XCM. + #[runtime::pallet_index(99)] + pub type XcmPallet = pallet_xcm; + + // Generalized message queue + #[runtime::pallet_index(100)] + pub type MessageQueue = pallet_message_queue; + + // Asset rate. + #[runtime::pallet_index(101)] + pub type AssetRate = pallet_asset_rate; + + // Root testing pallet. + #[runtime::pallet_index(102)] + pub type RootTesting = pallet_root_testing; + + // BEEFY Bridges support. + #[runtime::pallet_index(200)] + pub type Beefy = pallet_beefy; + // MMR leaf construction must be after session in order to have a leaf's next_auth_set + // refer to block. See issue polkadot-fellows/runtimes#160 for details. + #[runtime::pallet_index(201)] + pub type Mmr = pallet_mmr; + #[runtime::pallet_index(202)] + pub type BeefyMmrLeaf = pallet_beefy_mmr; + + // Pallet for migrating Identity to a parachain. To be removed post-migration. + #[runtime::pallet_index(248)] + pub type IdentityMigrator = identity_migrator; +} + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// `BlockId` type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The extension to the basic transaction logic. +pub type TxExtension = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckMortality, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + frame_metadata_hash_extension::CheckMetadataHash, + frame_system::WeightReclaim, +); + +parameter_types! { + /// Bounding number of agent pot accounts to be migrated in a single block. + pub const MaxAgentsToMigrate: u32 = 300; +} + +/// All migrations that will run on the next runtime upgrade. +/// +/// This contains the combined migrations of the last 10 releases. It allows to skip runtime +/// upgrades in case governance decides to do so. THE ORDER IS IMPORTANT. +pub type Migrations = migrations::Unreleased; + +/// The runtime migrations per release. +#[allow(deprecated, missing_docs)] +pub mod migrations { + use super::*; + + /// Unreleased migrations. Add new ones here: + pub type Unreleased = ( + parachains_shared::migration::MigrateToV1, + parachains_scheduler::migration::MigrateV2ToV3, + // permanent + pallet_xcm::migration::MigrateToLatestXcmVersion, + ); +} + +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + Migrations, +>; +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + frame_benchmarking::define_benchmarks!( + // Polkadot + // NOTE: Make sure to prefix these with `runtime_common::` so + // the that path resolves correctly in the generated file. + [polkadot_runtime_common::assigned_slots, AssignedSlots] + [polkadot_runtime_common::auctions, Auctions] + [polkadot_runtime_common::crowdloan, Crowdloan] + [polkadot_runtime_common::identity_migrator, IdentityMigrator] + [polkadot_runtime_common::paras_registrar, Registrar] + [polkadot_runtime_common::slots, Slots] + [polkadot_runtime_parachains::configuration, Configuration] + [polkadot_runtime_parachains::disputes, ParasDisputes] + [polkadot_runtime_parachains::disputes::slashing, ParasSlashing] + [polkadot_runtime_parachains::hrmp, Hrmp] + [polkadot_runtime_parachains::inclusion, ParaInclusion] + [polkadot_runtime_parachains::initializer, Initializer] + [polkadot_runtime_parachains::paras, Paras] + [polkadot_runtime_parachains::paras_inherent, ParaInherent] + [polkadot_runtime_parachains::on_demand, OnDemandAssignmentProvider] + [polkadot_runtime_parachains::coretime, Coretime] + // Substrate + [pallet_bags_list, VoterList] + [pallet_balances, Balances] + [pallet_beefy_mmr, BeefyMmrLeaf] + [pallet_conviction_voting, ConvictionVoting] + [frame_election_provider_support, ElectionProviderBench::] + [pallet_identity, Identity] + [pallet_indices, Indices] + [pallet_message_queue, MessageQueue] + [pallet_migrations, MultiBlockMigrations] + [pallet_mmr, Mmr] + [pallet_multisig, Multisig] + [pallet_offences, OffencesBench::] + [pallet_parameters, Parameters] + [pallet_preimage, Preimage] + [pallet_proxy, Proxy] + [pallet_recovery, Recovery] + [pallet_referenda, Referenda] + [pallet_scheduler, Scheduler] + [pallet_session, SessionBench::] + [pallet_sudo, Sudo] + [frame_system, SystemBench::] + [frame_system_extensions, SystemExtensionsBench::] + [pallet_timestamp, Timestamp] + [pallet_transaction_payment, TransactionPayment] + [pallet_treasury, Treasury] + [pallet_utility, Utility] + [pallet_vesting, Vesting] + [pallet_whitelist, Whitelist] + [pallet_asset_rate, AssetRate] + // XCM + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] + // NOTE: Make sure you point to the individual modules below. + [pallet_xcm_benchmarks::fungible, XcmBalances] + [pallet_xcm_benchmarks::generic, XcmGeneric] + ); +} + +sp_api::impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block); + } + + fn initialize_block(header: &::Header) -> sp_runtime::ExtrinsicInclusionMode { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> alloc::vec::Vec { + Runtime::metadata_versions() + } + } + + impl frame_support::view_functions::runtime_api::RuntimeViewFunction for Runtime { + fn execute_view_function(id: frame_support::view_functions::ViewFunctionId, input: Vec) -> Result, frame_support::view_functions::ViewFunctionDispatchError> { + Runtime::execute_view_function(id, input) + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + #[api_version(13)] + impl polkadot_primitives::runtime_api::ParachainHost for Runtime { + fn validators() -> Vec { + parachains_runtime_api_impl::validators::() + } + + fn validation_code_bomb_limit() -> u32 { + parachains_staging_runtime_api_impl::validation_code_bomb_limit::() + } + + fn validator_groups() -> (Vec>, GroupRotationInfo) { + parachains_runtime_api_impl::validator_groups::() + } + + fn availability_cores() -> Vec> { + parachains_runtime_api_impl::availability_cores::() + } + + fn persisted_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) + -> Option> { + parachains_runtime_api_impl::persisted_validation_data::(para_id, assumption) + } + + fn assumed_validation_data( + para_id: ParaId, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)> { + parachains_runtime_api_impl::assumed_validation_data::( + para_id, + expected_persisted_validation_data_hash, + ) + } + + fn check_validation_outputs( + para_id: ParaId, + outputs: polkadot_primitives::CandidateCommitments, + ) -> bool { + parachains_runtime_api_impl::check_validation_outputs::(para_id, outputs) + } + + fn session_index_for_child() -> SessionIndex { + parachains_runtime_api_impl::session_index_for_child::() + } + + fn validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) + -> Option { + parachains_runtime_api_impl::validation_code::(para_id, assumption) + } + + fn candidate_pending_availability(para_id: ParaId) -> Option> { + #[allow(deprecated)] + parachains_runtime_api_impl::candidate_pending_availability::(para_id) + } + + fn candidate_events() -> Vec> { + parachains_runtime_api_impl::candidate_events::(|ev| { + match ev { + RuntimeEvent::ParaInclusion(ev) => { + Some(ev) + } + _ => None, + } + }) + } + + fn session_info(index: SessionIndex) -> Option { + parachains_runtime_api_impl::session_info::(index) + } + + fn session_executor_params(session_index: SessionIndex) -> Option { + parachains_runtime_api_impl::session_executor_params::(session_index) + } + + fn dmq_contents(recipient: ParaId) -> Vec> { + parachains_runtime_api_impl::dmq_contents::(recipient) + } + + fn inbound_hrmp_channels_contents( + recipient: ParaId + ) -> BTreeMap>> { + parachains_runtime_api_impl::inbound_hrmp_channels_contents::(recipient) + } + + fn validation_code_by_hash(hash: ValidationCodeHash) -> Option { + parachains_runtime_api_impl::validation_code_by_hash::(hash) + } + + fn on_chain_votes() -> Option> { + parachains_runtime_api_impl::on_chain_votes::() + } + + fn submit_pvf_check_statement( + stmt: PvfCheckStatement, + signature: ValidatorSignature, + ) { + parachains_runtime_api_impl::submit_pvf_check_statement::(stmt, signature) + } + + fn pvfs_require_precheck() -> Vec { + parachains_runtime_api_impl::pvfs_require_precheck::() + } + + fn validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption) + -> Option + { + parachains_runtime_api_impl::validation_code_hash::(para_id, assumption) + } + + fn disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { + parachains_runtime_api_impl::get_session_disputes::() + } + + fn unapplied_slashes( + ) -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)> { + parachains_runtime_api_impl::unapplied_slashes::() + } + + fn key_ownership_proof( + validator_id: ValidatorId, + ) -> Option { + use codec::Encode; + + Historical::prove((PARACHAIN_KEY_TYPE_ID, validator_id)) + .map(|p| p.encode()) + .map(slashing::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_dispute_lost( + dispute_proof: slashing::DisputeProof, + key_ownership_proof: slashing::OpaqueKeyOwnershipProof, + ) -> Option<()> { + parachains_runtime_api_impl::submit_unsigned_slashing_report::( + dispute_proof, + key_ownership_proof, + ) + } + + fn minimum_backing_votes() -> u32 { + parachains_runtime_api_impl::minimum_backing_votes::() + } + + fn para_backing_state(para_id: ParaId) -> Option { + #[allow(deprecated)] + parachains_runtime_api_impl::backing_state::(para_id) + } + + fn async_backing_params() -> polkadot_primitives::AsyncBackingParams { + #[allow(deprecated)] + parachains_runtime_api_impl::async_backing_params::() + } + + fn approval_voting_params() -> ApprovalVotingParams { + parachains_runtime_api_impl::approval_voting_params::() + } + + fn disabled_validators() -> Vec { + parachains_runtime_api_impl::disabled_validators::() + } + + fn node_features() -> NodeFeatures { + parachains_runtime_api_impl::node_features::() + } + + fn claim_queue() -> BTreeMap> { + parachains_runtime_api_impl::claim_queue::() + } + + fn candidates_pending_availability(para_id: ParaId) -> Vec> { + parachains_runtime_api_impl::candidates_pending_availability::(para_id) + } + + fn backing_constraints(para_id: ParaId) -> Option { + parachains_staging_runtime_api_impl::backing_constraints::(para_id) + } + + fn scheduling_lookahead() -> u32 { + parachains_staging_runtime_api_impl::scheduling_lookahead::() + } + } + + #[api_version(5)] + impl sp_consensus_beefy::BeefyApi for Runtime { + fn beefy_genesis() -> Option { + pallet_beefy::GenesisBlock::::get() + } + + fn validator_set() -> Option> { + Beefy::validator_set() + } + + fn submit_report_double_voting_unsigned_extrinsic( + equivocation_proof: sp_consensus_beefy::DoubleVotingProof< + BlockNumber, + BeefyId, + BeefySignature, + >, + key_owner_proof: sp_consensus_beefy::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Beefy::submit_unsigned_double_voting_report( + equivocation_proof, + key_owner_proof, + ) + } + + fn submit_report_fork_voting_unsigned_extrinsic( + equivocation_proof: + sp_consensus_beefy::ForkVotingProof< + ::Header, + BeefyId, + sp_runtime::OpaqueValue + >, + key_owner_proof: sp_consensus_beefy::OpaqueKeyOwnershipProof, + ) -> Option<()> { + Beefy::submit_unsigned_fork_voting_report( + equivocation_proof.try_into()?, + key_owner_proof.decode()?, + ) + } + + fn submit_report_future_block_voting_unsigned_extrinsic( + equivocation_proof: sp_consensus_beefy::FutureBlockVotingProof, + key_owner_proof: sp_consensus_beefy::OpaqueKeyOwnershipProof, + ) -> Option<()> { + Beefy::submit_unsigned_future_block_voting_report( + equivocation_proof, + key_owner_proof.decode()?, + ) + } + + fn generate_key_ownership_proof( + _set_id: sp_consensus_beefy::ValidatorSetId, + authority_id: BeefyId, + ) -> Option { + use codec::Encode; + + Historical::prove((sp_consensus_beefy::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(sp_consensus_beefy::OpaqueKeyOwnershipProof::new) + } + + fn generate_ancestry_proof( + prev_block_number: BlockNumber, + best_known_block_number: Option, + ) -> Option { + use sp_consensus_beefy::AncestryHelper; + + BeefyMmrLeaf::generate_proof(prev_block_number, best_known_block_number) + .map(|p| p.encode()) + .map(sp_runtime::OpaqueValue::new) + } + } + + impl mmr::MmrApi for Runtime { + fn mmr_root() -> Result { + Ok(pallet_mmr::RootHash::::get()) + } + + fn mmr_leaf_count() -> Result { + Ok(pallet_mmr::NumberOfLeaves::::get()) + } + + fn generate_proof( + block_numbers: Vec, + best_known_block_number: Option, + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { + Mmr::generate_proof(block_numbers, best_known_block_number).map( + |(leaves, proof)| { + ( + leaves + .into_iter() + .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) + .collect(), + proof, + ) + }, + ) + } + + fn verify_proof(leaves: Vec, proof: mmr::LeafProof) + -> Result<(), mmr::Error> + { + let leaves = leaves.into_iter().map(|leaf| + leaf.into_opaque_leaf() + .try_decode() + .ok_or(mmr::Error::Verify)).collect::, mmr::Error>>()?; + Mmr::verify_leaves(leaves, proof) + } + + fn verify_proof_stateless( + root: mmr::Hash, + leaves: Vec, + proof: mmr::LeafProof + ) -> Result<(), mmr::Error> { + let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); + pallet_mmr::verify_leaves_proof::(root, nodes, proof) + } + } + + impl pallet_beefy_mmr::BeefyMmrApi for RuntimeApi { + fn authority_set_proof() -> sp_consensus_beefy::mmr::BeefyAuthoritySet { + BeefyMmrLeaf::authority_set_proof() + } + + fn next_authority_set_proof() -> sp_consensus_beefy::mmr::BeefyNextAuthoritySet { + BeefyMmrLeaf::next_authority_set_proof() + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> Vec<(GrandpaId, u64)> { + Grandpa::grandpa_authorities() + } + + fn current_set_id() -> fg_primitives::SetId { + pallet_grandpa::CurrentSetId::::get() + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: fg_primitives::EquivocationProof< + ::Hash, + sp_runtime::traits::NumberFor, + >, + key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Grandpa::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + + fn generate_key_ownership_proof( + _set_id: fg_primitives::SetId, + authority_id: fg_primitives::AuthorityId, + ) -> Option { + use codec::Encode; + + Historical::prove((fg_primitives::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(fg_primitives::OpaqueKeyOwnershipProof::new) + } + } + + impl sp_consensus_babe::BabeApi for Runtime { + fn configuration() -> sp_consensus_babe::BabeConfiguration { + let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG); + sp_consensus_babe::BabeConfiguration { + slot_duration: Babe::slot_duration(), + epoch_length: EpochDuration::get(), + c: epoch_config.c, + authorities: Babe::authorities().to_vec(), + randomness: Babe::randomness(), + allowed_slots: epoch_config.allowed_slots, + } + } + + fn current_epoch_start() -> sp_consensus_babe::Slot { + Babe::current_epoch_start() + } + + fn current_epoch() -> sp_consensus_babe::Epoch { + Babe::current_epoch() + } + + fn next_epoch() -> sp_consensus_babe::Epoch { + Babe::next_epoch() + } + + fn generate_key_ownership_proof( + _slot: sp_consensus_babe::Slot, + authority_id: sp_consensus_babe::AuthorityId, + ) -> Option { + use codec::Encode; + + Historical::prove((sp_consensus_babe::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(sp_consensus_babe::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: sp_consensus_babe::EquivocationProof<::Header>, + key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Babe::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + } + + impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { + fn authorities() -> Vec { + parachains_runtime_api_impl::relevant_authority_ids::() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + Balance, + > for Runtime { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info(call: RuntimeCall, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details(call: RuntimeCall, len: u32) -> FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl xcm_runtime_apis::fees::XcmPaymentApi for Runtime { + fn query_acceptable_payment_assets(xcm_version: xcm::Version) -> Result, XcmPaymentApiError> { + let acceptable_assets = vec![AssetId(xcm_config::TokenLocation::get())]; + XcmPallet::query_acceptable_payment_assets(xcm_version, acceptable_assets) + } + + fn query_weight_to_asset_fee(weight: Weight, asset: VersionedAssetId) -> Result { + let latest_asset_id: Result = asset.clone().try_into(); + match latest_asset_id { + Ok(asset_id) if asset_id.0 == xcm_config::TokenLocation::get() => { + // for native token + Ok(WeightToFee::weight_to_fee(&weight)) + }, + Ok(asset_id) => { + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - unhandled asset_id: {asset_id:?}!"); + Err(XcmPaymentApiError::AssetNotFound) + }, + Err(_) => { + log::trace!(target: "xcm::xcm_runtime_apis", "query_weight_to_asset_fee - failed to convert asset: {asset:?}!"); + Err(XcmPaymentApiError::VersionedConversionFailed) + } + } + } + + fn query_xcm_weight(message: VersionedXcm<()>) -> Result { + XcmPallet::query_xcm_weight(message) + } + + fn query_delivery_fees(destination: VersionedLocation, message: VersionedXcm<()>) -> Result { + XcmPallet::query_delivery_fees(destination, message) + } + } + + impl xcm_runtime_apis::dry_run::DryRunApi for Runtime { + fn dry_run_call(origin: OriginCaller, call: RuntimeCall, result_xcms_version: xcm::prelude::XcmVersion) -> Result, XcmDryRunApiError> { + XcmPallet::dry_run_call::(origin, call, result_xcms_version) + } + + fn dry_run_xcm(origin_location: VersionedLocation, xcm: VersionedXcm) -> Result, XcmDryRunApiError> { + XcmPallet::dry_run_xcm::(origin_location, xcm) + } + } + + impl xcm_runtime_apis::conversions::LocationToAccountApi for Runtime { + fn convert_location(location: VersionedLocation) -> Result< + AccountId, + xcm_runtime_apis::conversions::Error + > { + xcm_runtime_apis::conversions::LocationToAccountHelper::< + AccountId, + xcm_config::LocationConverter, + >::convert_location(location) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + log::info!("try-runtime::on_runtime_upgrade westend."); + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, BlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::BenchmarkList; + use frame_support::traits::StorageInfoTrait; + + use pallet_session_benchmarking::Pallet as SessionBench; + use pallet_offences_benchmarking::Pallet as OffencesBench; + use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; + + type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; + type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + return (list, storage_info) + } + + #[allow(non_local_definitions)] + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig, + ) -> Result< + Vec, + alloc::string::String, + > { + use frame_support::traits::WhitelistedStorageKeys; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + // Trying to add benchmarks directly to some pallets caused cyclic dependency issues. + // To get around that, we separated the benchmarks into its own crate. + use pallet_session_benchmarking::Pallet as SessionBench; + use pallet_offences_benchmarking::Pallet as OffencesBench; + use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + use frame_system_benchmarking::Pallet as SystemBench; + use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; + + impl pallet_session_benchmarking::Config for Runtime {} + impl pallet_offences_benchmarking::Config for Runtime {} + impl pallet_election_provider_support_benchmarking::Config for Runtime {} + + use xcm_config::{AssetHub, TokenLocation}; + + use alloc::boxed::Box; + + parameter_types! { + pub ExistentialDepositAsset: Option = Some(( + TokenLocation::get(), + ExistentialDeposit::get() + ).into()); + pub AssetHubParaId: ParaId = pallet_staking_async_rc_runtime_constants::system_parachain::ASSET_HUB_ID.into(); + pub const RandomParaId: ParaId = ParaId::new(43211234); + } + + impl pallet_xcm::benchmarking::Config for Runtime { + type DeliveryHelper = ( + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + AssetHubParaId, + Dmp, + >, + polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + RandomParaId, + Dmp, + > + ); + + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(Asset, Location)> { + // Relay/native token can be teleported to/from AH. + Some(( + Asset { fun: Fungible(ExistentialDeposit::get()), id: AssetId(Here.into()) }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(Asset, Location)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + Asset { + fun: Fungible(ExistentialDeposit::get()), + id: AssetId(Here.into()) + }, + crate::Junction::Parachain(RandomParaId::get().into()).into(), + )) + } + + fn set_up_complex_asset_transfer( + ) -> Option<(Assets, u32, Location, Box)> { + // Relay supports only native token, either reserve transfer it to non-system parachains, + // or teleport it to system parachain. Use the teleport case for benchmarking as it's + // slightly heavier. + + // Relay/native token can be teleported to/from AH. + let native_location = Here.into(); + let dest = crate::xcm_config::AssetHub::get(); + pallet_xcm::benchmarking::helpers::native_teleport_as_asset_transfer::( + native_location, + dest + ) + } + + fn get_asset() -> Asset { + Asset { + id: AssetId(Location::here()), + fun: Fungible(ExistentialDeposit::get()), + } + } + } + impl frame_system_benchmarking::Config for Runtime {} + impl polkadot_runtime_parachains::disputes::slashing::benchmarking::Config for Runtime {} + + use xcm::latest::{ + AssetId, Fungibility::*, InteriorLocation, Junction, Junctions::*, + Asset, Assets, Location, NetworkId, Response, + }; + + impl pallet_xcm_benchmarks::Config for Runtime { + type XcmConfig = xcm_config::XcmConfig; + type AccountIdConverter = xcm_config::LocationConverter; + type DeliveryHelper = polkadot_runtime_common::xcm_sender::ToParachainDeliveryHelper< + xcm_config::XcmConfig, + ExistentialDepositAsset, + xcm_config::PriceForChildParachainDelivery, + AssetHubParaId, + Dmp, + >; + fn valid_destination() -> Result { + Ok(AssetHub::get()) + } + fn worst_case_holding(_depositable_count: u32) -> Assets { + // Westend only knows about WND. + vec![Asset{ + id: AssetId(TokenLocation::get()), + fun: Fungible(1_000_000 * UNITS), + }].into() + } + } + + parameter_types! { + pub TrustedTeleporter: Option<(Location, Asset)> = Some(( + AssetHub::get(), + Asset { fun: Fungible(1 * UNITS), id: AssetId(TokenLocation::get()) }, + )); + pub const TrustedReserve: Option<(Location, Asset)> = None; + } + + impl pallet_xcm_benchmarks::fungible::Config for Runtime { + type TransactAsset = Balances; + + type CheckedAccount = xcm_config::LocalCheckAccount; + type TrustedTeleporter = TrustedTeleporter; + type TrustedReserve = TrustedReserve; + + fn get_asset() -> Asset { + Asset { + id: AssetId(TokenLocation::get()), + fun: Fungible(1 * UNITS), + } + } + } + + impl pallet_xcm_benchmarks::generic::Config for Runtime { + type TransactAsset = Balances; + type RuntimeCall = RuntimeCall; + + fn worst_case_response() -> (u64, Response) { + (0u64, Response::Version(Default::default())) + } + + fn worst_case_asset_exchange() -> Result<(Assets, Assets), BenchmarkError> { + // Westend doesn't support asset exchanges + Err(BenchmarkError::Skip) + } + + fn universal_alias() -> Result<(Location, Junction), BenchmarkError> { + // The XCM executor of Westend doesn't have a configured `UniversalAliases` + Err(BenchmarkError::Skip) + } + + fn transact_origin_and_runtime_call() -> Result<(Location, RuntimeCall), BenchmarkError> { + Ok((AssetHub::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) + } + + fn subscribe_origin() -> Result { + Ok(AssetHub::get()) + } + + fn claimable_asset() -> Result<(Location, Location, Assets), BenchmarkError> { + let origin = AssetHub::get(); + let assets: Assets = (AssetId(TokenLocation::get()), 1_000 * UNITS).into(); + let ticket = Location { parents: 0, interior: Here }; + Ok((origin, ticket, assets)) + } + + fn fee_asset() -> Result { + Ok(Asset { + id: AssetId(TokenLocation::get()), + fun: Fungible(1_000_000 * UNITS), + }) + } + + fn unlockable_asset() -> Result<(Location, Location, Asset), BenchmarkError> { + // Westend doesn't support asset locking + Err(BenchmarkError::Skip) + } + + fn export_message_origin_and_destination( + ) -> Result<(Location, NetworkId, InteriorLocation), BenchmarkError> { + // Westend doesn't support exporting messages + Err(BenchmarkError::Skip) + } + + fn alias_origin() -> Result<(Location, Location), BenchmarkError> { + let origin = Location::new(0, [Parachain(1000)]); + let target = Location::new(0, [Parachain(1000), AccountId32 { id: [128u8; 32], network: None }]); + Ok((origin, target)) + } + } + + type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; + type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; + + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + + add_benchmarks!(params, batches); + + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn build_state(config: Vec) -> sp_genesis_builder::Result { + build_state::(config) + } + + fn get_preset(id: &Option) -> Option> { + get_preset::(id, &genesis_config_presets::get_preset) + } + + fn preset_names() -> Vec { + genesis_config_presets::preset_names() + } + } + + impl xcm_runtime_apis::trusted_query::TrustedQueryApi for Runtime { + fn is_trusted_reserve(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_reserve(asset, location) + } + fn is_trusted_teleporter(asset: VersionedAsset, location: VersionedLocation) -> Result { + XcmPallet::is_trusted_teleporter(asset, location) + } + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/tests.rs b/substrate/frame/staking-async/runtimes/rc/src/tests.rs new file mode 100644 index 0000000000000..657370a9b6f95 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/tests.rs @@ -0,0 +1,215 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Tests for the Westend Runtime Configuration + +use std::collections::HashSet; + +use crate::{xcm_config::LocationConverter, *}; +use frame_support::traits::WhitelistedStorageKeys; +use sp_core::{crypto::Ss58Codec, hexdisplay::HexDisplay}; +use sp_keyring::Sr25519Keyring::Alice; +use xcm_runtime_apis::conversions::LocationToAccountHelper; + +#[test] +fn remove_keys_weight_is_sensible() { + use polkadot_runtime_common::crowdloan::WeightInfo; + let max_weight = ::WeightInfo::refund(RemoveKeysLimit::get()); + // Max remove keys limit should be no more than half the total block weight. + assert!((max_weight * 2).all_lt(BlockWeights::get().max_block)); +} + +#[test] +fn sample_size_is_sensible() { + use polkadot_runtime_common::auctions::WeightInfo; + // Need to clean up all samples at the end of an auction. + let samples: BlockNumber = EndingPeriod::get() / SampleLength::get(); + let max_weight: frame_support::weights::Weight = + RocksDbWeight::get().reads_writes(samples.into(), samples.into()); + // Max sample cleanup should be no more than half the total block weight. + assert!((max_weight * 2).all_lt(BlockWeights::get().max_block)); + assert!((::WeightInfo::on_initialize() * 2) + .all_lt(BlockWeights::get().max_block)); +} + +#[test] +fn call_size() { + RuntimeCall::assert_size_under(256); +} + +#[test] +fn sanity_check_teleport_assets_weight() { + // This test sanity checks that at least 50 teleports can exist in a block. + // Usually when XCM runs into an issue, it will return a weight of `Weight::MAX`, + // so this test will certainly ensure that this problem does not occur. + use frame_support::dispatch::GetDispatchInfo; + let weight = pallet_xcm::Call::::limited_teleport_assets { + dest: Box::new(Here.into()), + beneficiary: Box::new(Here.into()), + assets: Box::new((Here, 200_000).into()), + fee_asset_item: 0, + weight_limit: Unlimited, + } + .get_dispatch_info() + .call_weight; + + assert!((weight * 50).all_lt(BlockWeights::get().max_block)); +} + +#[test] +fn check_whitelist() { + let whitelist: HashSet = AllPalletsWithSystem::whitelisted_storage_keys() + .iter() + .map(|e| HexDisplay::from(&e.key).to_string()) + .collect(); + + // Block number + assert!(whitelist.contains("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac")); + // Total issuance + assert!(whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80")); + // Execution phase + assert!(whitelist.contains("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a")); + // Event count + assert!(whitelist.contains("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850")); + // System events + assert!(whitelist.contains("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7")); + // Configuration ActiveConfig + assert!(whitelist.contains("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385")); + // XcmPallet VersionDiscoveryQueue + assert!(whitelist.contains("1405f2411d0af5a7ff397e7c9dc68d194a222ba0333561192e474c59ed8e30e1")); + // XcmPallet SafeXcmVersion + assert!(whitelist.contains("1405f2411d0af5a7ff397e7c9dc68d196323ae84c43568be0d1394d5d0d522c4")); +} + +#[test] +fn check_treasury_pallet_id() { + assert_eq!( + ::index() as u8, + pallet_staking_async_rc_runtime_constants::TREASURY_PALLET_ID + ); +} + +#[cfg(all(test, feature = "try-runtime"))] +mod remote_tests { + use super::*; + use frame_try_runtime::{runtime_decl_for_try_runtime::TryRuntime, UpgradeCheckSelect}; + use remote_externalities::{ + Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, Transport, + }; + use std::env::var; + + #[tokio::test] + async fn run_migrations() { + if var("RUN_MIGRATION_TESTS").is_err() { + return; + } + + sp_tracing::try_init_simple(); + let transport: Transport = + var("WS").unwrap_or("wss://westend-rpc.polkadot.io:443".to_string()).into(); + let maybe_state_snapshot: Option = var("SNAP").map(|s| s.into()).ok(); + let mut ext = Builder::::default() + .mode(if let Some(state_snapshot) = maybe_state_snapshot { + Mode::OfflineOrElseOnline( + OfflineConfig { state_snapshot: state_snapshot.clone() }, + OnlineConfig { + transport, + state_snapshot: Some(state_snapshot), + ..Default::default() + }, + ) + } else { + Mode::Online(OnlineConfig { transport, ..Default::default() }) + }) + .build() + .await + .unwrap(); + ext.execute_with(|| Runtime::on_runtime_upgrade(UpgradeCheckSelect::PreAndPost)); + } +} + +#[test] +fn location_conversion_works() { + // the purpose of hardcoded values is to catch an unintended location conversion logic change. + struct TestCase { + description: &'static str, + location: Location, + expected_account_id_str: &'static str, + } + + let test_cases = vec![ + // DescribeTerminus + TestCase { + description: "DescribeTerminus Child", + location: Location::new(0, [Parachain(1111)]), + expected_account_id_str: "5Ec4AhP4h37t7TFsAZ4HhFq6k92usAAJDUC3ADSZ4H4Acru3", + }, + // DescribePalletTerminal + TestCase { + description: "DescribePalletTerminal Child", + location: Location::new(0, [Parachain(1111), PalletInstance(50)]), + expected_account_id_str: "5FjEBrKn3STAFsZpQF4jzwxUYHNGnNgzdZqSQfTzeJ82XKp6", + }, + // DescribeAccountId32Terminal + TestCase { + description: "DescribeAccountId32Terminal Child", + location: Location::new( + 0, + [Parachain(1111), AccountId32 { network: None, id: AccountId::from(Alice).into() }], + ), + expected_account_id_str: "5EEMro9RRDpne4jn9TuD7cTB6Amv1raVZ3xspSkqb2BF3FJH", + }, + // DescribeAccountKey20Terminal + TestCase { + description: "DescribeAccountKey20Terminal Child", + location: Location::new( + 0, + [Parachain(1111), AccountKey20 { network: None, key: [0u8; 20] }], + ), + expected_account_id_str: "5HohjXdjs6afcYcgHHSstkrtGfxgfGKsnZ1jtewBpFiGu4DL", + }, + // DescribeTreasuryVoiceTerminal + TestCase { + description: "DescribeTreasuryVoiceTerminal Child", + location: Location::new( + 0, + [Parachain(1111), Plurality { id: BodyId::Treasury, part: BodyPart::Voice }], + ), + expected_account_id_str: "5GenE4vJgHvwYVcD6b4nBvH5HNY4pzpVHWoqwFpNMFT7a2oX", + }, + // DescribeBodyTerminal + TestCase { + description: "DescribeBodyTerminal Child", + location: Location::new( + 0, + [Parachain(1111), Plurality { id: BodyId::Unit, part: BodyPart::Voice }], + ), + expected_account_id_str: "5DPgGBFTTYm1dGbtB1VWHJ3T3ScvdrskGGx6vSJZNP1WNStV", + }, + ]; + + for tc in test_cases { + let expected = + AccountId::from_string(tc.expected_account_id_str).expect("Invalid AccountId string"); + + let got = LocationToAccountHelper::::convert_location( + tc.location.into(), + ) + .unwrap(); + + assert_eq!(got, expected, "{}", tc.description); + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/frame_system.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/frame_system.rs new file mode 100644 index 0000000000000..514a2eaedac1b --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/frame_system.rs @@ -0,0 +1,172 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=frame_system +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_046_000 picoseconds. + Weight::from_parts(2_092_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 160 + .saturating_add(Weight::from_parts(14_296, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_343_000 picoseconds. + Weight::from_parts(6_529_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 158 + .saturating_add(Weight::from_parts(15_724, 0).saturating_mul(b.into())) + } + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_611_000 picoseconds. + Weight::from_parts(3_856_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `67035` + // Minimum execution time: 186_708_046_000 picoseconds. + Weight::from_parts(188_430_007_000, 0) + .saturating_add(Weight::from_parts(0, 67035)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_067_000 picoseconds. + Weight::from_parts(2_183_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_517 + .saturating_add(Weight::from_parts(739_980, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_996_000 picoseconds. + Weight::from_parts(2_130_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_375 + .saturating_add(Weight::from_parts(575_422, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `69 + p * (69 ±0)` + // Estimated: `81 + p * (70 ±0)` + // Minimum execution time: 3_947_000 picoseconds. + Weight::from_parts(4_152_000, 0) + .saturating_add(Weight::from_parts(0, 81)) + // Standard Error: 2_193 + .saturating_add(Weight::from_parts(1_392_480, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } + /// Storage: `System::AuthorizedUpgrade` (r:0 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn authorize_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 17_968_000 picoseconds. + Weight::from_parts(19_353_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::AuthorizedUpgrade` (r:1 w:1) + /// Proof: `System::AuthorizedUpgrade` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a636f6465` (r:0 w:1) + fn apply_authorized_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `26` + // Estimated: `67035` + // Minimum execution time: 190_893_853_000 picoseconds. + Weight::from_parts(193_181_367_000, 0) + .saturating_add(Weight::from_parts(0, 67035)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/frame_system_extensions.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/frame_system_extensions.rs new file mode 100644 index 0000000000000..f230c1c92f67b --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/frame_system_extensions.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system_extensions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=frame_system_extensions +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system_extensions`. +pub struct WeightInfo(PhantomData); +impl frame_system::ExtensionsWeightInfo for WeightInfo { + fn check_genesis() -> Weight { + // Proof Size summary in bytes: + // Measured: `30` + // Estimated: `0` + // Minimum execution time: 3_347_000 picoseconds. + Weight::from_parts(3_488_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_mortality_mortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `68` + // Estimated: `0` + // Minimum execution time: 6_549_000 picoseconds. + Weight::from_parts(6_749_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_mortality_immortal_transaction() -> Weight { + // Proof Size summary in bytes: + // Measured: `68` + // Estimated: `0` + // Minimum execution time: 6_331_000 picoseconds. + Weight::from_parts(6_678_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_non_zero_sender() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 503_000 picoseconds. + Weight::from_parts(594_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn check_nonce() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 6_858_000 picoseconds. + Weight::from_parts(7_072_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn check_spec_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 405_000 picoseconds. + Weight::from_parts(446_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_tx_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 405_000 picoseconds. + Weight::from_parts(468_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn check_weight() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_806_000 picoseconds. + Weight::from_parts(3_935_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn weight_reclaim() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_214_000 picoseconds. + Weight::from_parts(2_379_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/mod.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/mod.rs new file mode 100644 index 0000000000000..6becb43673349 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/mod.rs @@ -0,0 +1,60 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A list of the different weight modules for our runtime. + +pub mod frame_system; +pub mod frame_system_extensions; +pub mod pallet_asset_rate; +pub mod pallet_balances; +pub mod pallet_beefy_mmr; +pub mod pallet_conviction_voting; +pub mod pallet_identity; +pub mod pallet_indices; +pub mod pallet_message_queue; +pub mod pallet_migrations; +pub mod pallet_mmr; +pub mod pallet_multisig; +pub mod pallet_parameters; +pub mod pallet_preimage; +pub mod pallet_proxy; +pub mod pallet_referenda_referenda; +pub mod pallet_scheduler; +pub mod pallet_session; +pub mod pallet_sudo; +pub mod pallet_timestamp; +pub mod pallet_transaction_payment; +pub mod pallet_treasury; +pub mod pallet_utility; +pub mod pallet_vesting; +pub mod pallet_whitelist; +pub mod pallet_xcm; +pub mod polkadot_runtime_common_assigned_slots; +pub mod polkadot_runtime_common_auctions; +pub mod polkadot_runtime_common_crowdloan; +pub mod polkadot_runtime_common_identity_migrator; +pub mod polkadot_runtime_common_paras_registrar; +pub mod polkadot_runtime_common_slots; +pub mod polkadot_runtime_parachains_configuration; +pub mod polkadot_runtime_parachains_coretime; +pub mod polkadot_runtime_parachains_disputes; +pub mod polkadot_runtime_parachains_disputes_slashing; +pub mod polkadot_runtime_parachains_hrmp; +pub mod polkadot_runtime_parachains_inclusion; +pub mod polkadot_runtime_parachains_initializer; +pub mod polkadot_runtime_parachains_on_demand; +pub mod polkadot_runtime_parachains_paras; +pub mod polkadot_runtime_parachains_paras_inherent; +pub mod xcm; diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_asset_rate.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_asset_rate.rs new file mode 100644 index 0000000000000..aac73cb1b9033 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_asset_rate.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_asset_rate` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_asset_rate +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_asset_rate`. +pub struct WeightInfo(PhantomData); +impl pallet_asset_rate::WeightInfo for WeightInfo { + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `4703` + // Minimum execution time: 13_141_000 picoseconds. + Weight::from_parts(13_549_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + fn update() -> Weight { + // Proof Size summary in bytes: + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 16_979_000 picoseconds. + Weight::from_parts(17_951_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:1) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + fn remove() -> Weight { + // Proof Size summary in bytes: + // Measured: `210` + // Estimated: `4703` + // Minimum execution time: 18_364_000 picoseconds. + Weight::from_parts(19_135_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_balances.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_balances.rs new file mode 100644 index 0000000000000..0ee1b270cfabe --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_balances.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_balances` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_balances +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_balances`. +pub struct WeightInfo(PhantomData); +impl pallet_balances::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 52_576_000 picoseconds. + Weight::from_parts(53_526_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 41_548_000 picoseconds. + Weight::from_parts(42_237_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_creating() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 19_240_000 picoseconds. + Weight::from_parts(20_294_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_killing() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 27_274_000 picoseconds. + Weight::from_parts(28_742_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 54_887_000 picoseconds. + Weight::from_parts(56_002_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 51_421_000 picoseconds. + Weight::from_parts(52_519_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `3593` + // Minimum execution time: 22_311_000 picoseconds. + Weight::from_parts(23_026_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (136 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 18_042_000 picoseconds. + Weight::from_parts(18_172_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 11_937 + .saturating_add(Weight::from_parts(15_351_977, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } + fn force_adjust_total_issuance() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_666_000 picoseconds. + Weight::from_parts(7_042_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn burn_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 32_955_000 picoseconds. + Weight::from_parts(33_340_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn burn_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 22_425_000 picoseconds. + Weight::from_parts(23_208_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_beefy_mmr.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_beefy_mmr.rs new file mode 100644 index 0000000000000..73debd9b3e8cc --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_beefy_mmr.rs @@ -0,0 +1,103 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_beefy_mmr` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_beefy_mmr +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_beefy_mmr`. +pub struct WeightInfo(PhantomData); +impl pallet_beefy_mmr::WeightInfo for WeightInfo { + /// The range of component `n` is `[2, 512]`. + fn n_leafs_proof_is_optimal(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 629_000 picoseconds. + Weight::from_parts(1_215_800, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 67 + .saturating_add(Weight::from_parts(1_275, 0).saturating_mul(n.into())) + } + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn extract_validation_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `68` + // Estimated: `3509` + // Minimum execution time: 9_629_000 picoseconds. + Weight::from_parts(10_234_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Mmr::Nodes` (r:1 w:0) + /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + fn read_peak() -> Weight { + // Proof Size summary in bytes: + // Measured: `221` + // Estimated: `3505` + // Minimum execution time: 6_052_000 picoseconds. + Weight::from_parts(6_388_000, 0) + .saturating_add(Weight::from_parts(0, 3505)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Mmr::RootHash` (r:1 w:0) + /// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Mmr::NumberOfLeaves` (r:1 w:0) + /// Proof: `Mmr::NumberOfLeaves` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// The range of component `n` is `[2, 512]`. + fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `213` + // Estimated: `1517` + // Minimum execution time: 12_197_000 picoseconds. + Weight::from_parts(25_888_246, 0) + .saturating_add(Weight::from_parts(0, 1517)) + // Standard Error: 2_043 + .saturating_add(Weight::from_parts(1_304_917, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_conviction_voting.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_conviction_voting.rs new file mode 100644 index 0000000000000..4dcd5b7ae9305 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_conviction_voting.rs @@ -0,0 +1,204 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_conviction_voting` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_conviction_voting +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_conviction_voting`. +pub struct WeightInfo(PhantomData); +impl pallet_conviction_voting::WeightInfo for WeightInfo { + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn vote_new() -> Weight { + // Proof Size summary in bytes: + // Measured: `13408` + // Estimated: `42428` + // Minimum execution time: 151_930_000 picoseconds. + Weight::from_parts(161_372_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn vote_existing() -> Weight { + // Proof Size summary in bytes: + // Measured: `14129` + // Estimated: `83866` + // Minimum execution time: 176_955_000 picoseconds. + Weight::from_parts(185_290_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn remove_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13918` + // Estimated: `83866` + // Minimum execution time: 141_988_000 picoseconds. + Weight::from_parts(149_871_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn remove_other_vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `13005` + // Estimated: `30706` + // Minimum execution time: 79_917_000 picoseconds. + Weight::from_parts(84_349_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 512]`. + fn delegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29603 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 75_473_000 picoseconds. + Weight::from_parts(873_424_384, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 60_903 + .saturating_add(Weight::from_parts(21_022_118, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(45)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: `ConvictionVoting::VotingFor` (r:2 w:2) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:512 w:512) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:50) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 512]`. + fn undelegate(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `29555 + r * (365 ±0)` + // Estimated: `83866 + r * (3411 ±0)` + // Minimum execution time: 47_752_000 picoseconds. + Weight::from_parts(847_009_624, 0) + .saturating_add(Weight::from_parts(0, 83866)) + // Standard Error: 62_499 + .saturating_add(Weight::from_parts(21_293_933, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(43)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 3411).saturating_mul(r.into())) + } + /// Storage: `ConvictionVoting::VotingFor` (r:1 w:1) + /// Proof: `ConvictionVoting::VotingFor` (`max_values`: None, `max_size`: Some(27241), added: 29716, mode: `MaxEncodedLen`) + /// Storage: `ConvictionVoting::ClassLocksFor` (r:1 w:1) + /// Proof: `ConvictionVoting::ClassLocksFor` (`max_values`: None, `max_size`: Some(311), added: 2786, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + fn unlock() -> Weight { + // Proof Size summary in bytes: + // Measured: `12181` + // Estimated: `30706` + // Minimum execution time: 107_409_000 picoseconds. + Weight::from_parts(114_301_000, 0) + .saturating_add(Weight::from_parts(0, 30706)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_identity.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_identity.rs new file mode 100644 index 0000000000000..e05a2b174d577 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_identity.rs @@ -0,0 +1,570 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_identity` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_identity +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_identity`. +pub struct WeightInfo(PhantomData); +impl pallet_identity::WeightInfo for WeightInfo { + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 19]`. + fn add_registrar(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 13_290_000 picoseconds. + Weight::from_parts(13_987_666, 0) + .saturating_add(Weight::from_parts(0, 2626)) + // Standard Error: 1_986 + .saturating_add(Weight::from_parts(118_336, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 20]`. + fn set_identity(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6977 + r * (5 ±0)` + // Estimated: `11003` + // Minimum execution time: 118_150_000 picoseconds. + Weight::from_parts(120_003_906, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 11_614 + .saturating_add(Weight::from_parts(237_861, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:100 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn set_subs_new(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `11003 + s * (2589 ±0)` + // Minimum execution time: 17_851_000 picoseconds. + Weight::from_parts(32_739_674, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 5_669 + .saturating_add(Weight::from_parts(3_789_127, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 2589).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 100]`. + fn set_subs_old(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `194 + p * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 17_896_000 picoseconds. + Weight::from_parts(32_581_610, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 4_474 + .saturating_add(Weight::from_parts(1_511_949, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + } + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 20]`. + /// The range of component `s` is `[0, 100]`. + fn clear_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7069 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 62_969_000 picoseconds. + Weight::from_parts(63_397_496, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 17_822 + .saturating_add(Weight::from_parts(156_307, 0).saturating_mul(r.into())) + // Standard Error: 3_477 + .saturating_add(Weight::from_parts(1_468_191, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + } + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 20]`. + fn request_judgement(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6967 + r * (57 ±0)` + // Estimated: `11003` + // Minimum execution time: 85_331_000 picoseconds. + Weight::from_parts(87_149_610, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 7_682 + .saturating_add(Weight::from_parts(161_766, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 20]`. + fn cancel_request(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `6998` + // Estimated: `11003` + // Minimum execution time: 82_868_000 picoseconds. + Weight::from_parts(84_836_576, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 5_840 + .saturating_add(Weight::from_parts(111_417, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 19]`. + fn set_fee(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 9_998_000 picoseconds. + Weight::from_parts(10_559_773, 0) + .saturating_add(Weight::from_parts(0, 2626)) + // Standard Error: 1_526 + .saturating_add(Weight::from_parts(86_637, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 19]`. + fn set_account_id(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 10_427_000 picoseconds. + Weight::from_parts(10_967_798, 0) + .saturating_add(Weight::from_parts(0, 2626)) + // Standard Error: 1_516 + .saturating_add(Weight::from_parts(64_950, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::Registrars` (r:1 w:1) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 19]`. + fn set_fields(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `89 + r * (57 ±0)` + // Estimated: `2626` + // Minimum execution time: 10_049_000 picoseconds. + Weight::from_parts(10_518_238, 0) + .saturating_add(Weight::from_parts(0, 2626)) + // Standard Error: 1_133 + .saturating_add(Weight::from_parts(70_941, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::Registrars` (r:1 w:0) + /// Proof: `Identity::Registrars` (`max_values`: Some(1), `max_size`: Some(1141), added: 1636, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 19]`. + fn provide_judgement(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7045 + r * (57 ±0)` + // Estimated: `11003` + // Minimum execution time: 106_329_000 picoseconds. + Weight::from_parts(108_408_384, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 9_587 + .saturating_add(Weight::from_parts(73_218, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 20]`. + /// The range of component `s` is `[0, 100]`. + fn kill_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7276 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11003` + // Minimum execution time: 68_530_000 picoseconds. + Weight::from_parts(71_229_661, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 18_380 + .saturating_add(Weight::from_parts(193_976, 0).saturating_mul(r.into())) + // Standard Error: 3_586 + .saturating_add(Weight::from_parts(1_446_685, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + } + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 99]`. + fn add_sub(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `475 + s * (36 ±0)` + // Estimated: `11003` + // Minimum execution time: 34_407_000 picoseconds. + Weight::from_parts(39_732_661, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_673 + .saturating_add(Weight::from_parts(117_300, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. + fn rename_sub(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `591 + s * (3 ±0)` + // Estimated: `11003` + // Minimum execution time: 21_851_000 picoseconds. + Weight::from_parts(24_585_489, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 789 + .saturating_add(Weight::from_parts(70_051, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::IdentityOf` (r:1 w:0) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. + fn remove_sub(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `638 + s * (35 ±0)` + // Estimated: `11003` + // Minimum execution time: 38_427_000 picoseconds. + Weight::from_parts(42_500_800, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_267 + .saturating_add(Weight::from_parts(95_006, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Identity::SuperOf` (r:1 w:1) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 99]`. + fn quit_sub(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `704 + s * (37 ±0)` + // Estimated: `6723` + // Minimum execution time: 29_425_000 picoseconds. + Weight::from_parts(32_022_317, 0) + .saturating_add(Weight::from_parts(0, 6723)) + // Standard Error: 928 + .saturating_add(Weight::from_parts(106_141, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Identity::AuthorityOf` (r:0 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn add_username_authority() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_523_000 picoseconds. + Weight::from_parts(7_722_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::AuthorityOf` (r:1 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn remove_username_authority() -> Weight { + // Proof Size summary in bytes: + // Measured: `79` + // Estimated: `3517` + // Minimum execution time: 14_255_000 picoseconds. + Weight::from_parts(15_084_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::AuthorityOf` (r:1 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::PendingUsernames` (r:1 w:0) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn set_username_for(_p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `219` + // Estimated: `3593` + // Minimum execution time: 71_739_000 picoseconds. + Weight::from_parts(94_823_220, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Identity::PendingUsernames` (r:1 w:1) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + fn accept_username() -> Weight { + // Proof Size summary in bytes: + // Measured: `116` + // Estimated: `3567` + // Minimum execution time: 25_721_000 picoseconds. + Weight::from_parts(26_422_000, 0) + .saturating_add(Weight::from_parts(0, 3567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Identity::PendingUsernames` (r:1 w:1) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn remove_expired_approval(_p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `3593` + // Minimum execution time: 25_492_000 picoseconds. + Weight::from_parts(57_463_718, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:0 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn set_primary_username() -> Weight { + // Proof Size summary in bytes: + // Measured: `172` + // Estimated: `3563` + // Minimum execution time: 17_653_000 picoseconds. + Weight::from_parts(18_274_000, 0) + .saturating_add(Weight::from_parts(0, 3563)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::UsernameInfoOf` (r:1 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + fn unbind_username() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `3563` + // Minimum execution time: 22_190_000 picoseconds. + Weight::from_parts(23_093_000, 0) + .saturating_add(Weight::from_parts(0, 3563)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn remove_username() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `3563` + // Minimum execution time: 27_024_000 picoseconds. + Weight::from_parts(28_770_000, 0) + .saturating_add(Weight::from_parts(0, 3563)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Identity::UsernameInfoOf` (r:1 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:1 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Identity::UnbindingUsernames` (r:1 w:1) + /// Proof: `Identity::UnbindingUsernames` (`max_values`: None, `max_size`: Some(53), added: 2528, mode: `MaxEncodedLen`) + /// Storage: `Identity::AuthorityOf` (r:1 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 1]`. + fn kill_username(_p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508` + // Estimated: `3593` + // Minimum execution time: 24_819_000 picoseconds. + Weight::from_parts(49_501_024, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:2 w:0) + /// Storage: `Identity::AuthorityOf` (r:0 w:1) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn migration_v2_authority_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `6087` + // Minimum execution time: 9_126_000 picoseconds. + Weight::from_parts(9_633_000, 0) + .saturating_add(Weight::from_parts(0, 6087)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:2 w:0) + /// Storage: `Identity::UsernameInfoOf` (r:0 w:1) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + fn migration_v2_username_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `159` + // Estimated: `6099` + // Minimum execution time: 8_795_000 picoseconds. + Weight::from_parts(9_249_000, 0) + .saturating_add(Weight::from_parts(0, 6099)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::IdentityOf` (r:2 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::UsernameOf` (r:0 w:1) + /// Proof: `Identity::UsernameOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn migration_v2_identity_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `7062` + // Estimated: `21016` + // Minimum execution time: 66_496_000 picoseconds. + Weight::from_parts(67_500_000, 0) + .saturating_add(Weight::from_parts(0, 21016)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Identity::PendingUsernames` (r:2 w:1) + /// Proof: `Identity::PendingUsernames` (`max_values`: None, `max_size`: Some(102), added: 2577, mode: `MaxEncodedLen`) + fn migration_v2_pending_username_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `201` + // Estimated: `6144` + // Minimum execution time: 11_523_000 picoseconds. + Weight::from_parts(12_151_000, 0) + .saturating_add(Weight::from_parts(0, 6144)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::AuthorityOf` (r:2 w:0) + /// Proof: `Identity::AuthorityOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f99622d1423cdd16f5c33e2b531c34a53d` (r:1 w:1) + fn migration_v2_cleanup_authority_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `288` + // Estimated: `6044` + // Minimum execution time: 15_382_000 picoseconds. + Weight::from_parts(15_984_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Identity::UsernameInfoOf` (r:2 w:0) + /// Proof: `Identity::UsernameInfoOf` (`max_values`: None, `max_size`: Some(98), added: 2573, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x2aeddc77fe58c98d50bd37f1b90840f97c182fead9255863460affdd63116be3` (r:1 w:1) + fn migration_v2_cleanup_username_step() -> Weight { + // Proof Size summary in bytes: + // Measured: `290` + // Estimated: `6136` + // Minimum execution time: 14_213_000 picoseconds. + Weight::from_parts(14_935_000, 0) + .saturating_add(Weight::from_parts(0, 6136)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_indices.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_indices.rs new file mode 100644 index 0000000000000..4da92b82a004b --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_indices.rs @@ -0,0 +1,134 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_indices` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_indices +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_indices`. +pub struct WeightInfo(PhantomData); +impl pallet_indices::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `487` + // Estimated: `5698` + // Minimum execution time: 47_219_000 picoseconds. + Weight::from_parts(48_694_000, 0) + .saturating_add(Weight::from_parts(0, 5698)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + fn claim() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3534` + // Minimum execution time: 26_421_000 picoseconds. + Weight::from_parts(27_302_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `203` + // Estimated: `3593` + // Minimum execution time: 39_565_000 picoseconds. + Weight::from_parts(40_741_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + fn free() -> Weight { + // Proof Size summary in bytes: + // Measured: `100` + // Estimated: `3534` + // Minimum execution time: 28_046_000 picoseconds. + Weight::from_parts(28_775_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `203` + // Estimated: `3593` + // Minimum execution time: 29_411_000 picoseconds. + Weight::from_parts(30_698_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Indices::Accounts` (r:1 w:1) + /// Proof: `Indices::Accounts` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) + fn freeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `100` + // Estimated: `3534` + // Minimum execution time: 30_839_000 picoseconds. + Weight::from_parts(31_621_000, 0) + .saturating_add(Weight::from_parts(0, 3534)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_message_queue.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000..8cd96bc810c42 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_message_queue.rs @@ -0,0 +1,213 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `6050` + // Minimum execution time: 16_991_000 picoseconds. + Weight::from_parts(17_813_000, 0) + .saturating_add(Weight::from_parts(0, 6050)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `6050` + // Minimum execution time: 16_268_000 picoseconds. + Weight::from_parts(16_659_000, 0) + .saturating_add(Weight::from_parts(0, 6050)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3520` + // Minimum execution time: 4_901_000 picoseconds. + Weight::from_parts(5_262_000, 0) + .saturating_add(Weight::from_parts(0, 3520)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `115` + // Estimated: `134587` + // Minimum execution time: 10_587_000 picoseconds. + Weight::from_parts(11_040_000, 0) + .saturating_add(Weight::from_parts(0, 134587)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `115` + // Estimated: `134587` + // Minimum execution time: 10_729_000 picoseconds. + Weight::from_parts(11_263_000, 0) + .saturating_add(Weight::from_parts(0, 134587)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 381_300_000 picoseconds. + Weight::from_parts(390_220_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `220` + // Estimated: `3520` + // Minimum execution time: 12_503_000 picoseconds. + Weight::from_parts(12_909_000, 0) + .saturating_add(Weight::from_parts(0, 3520)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `220` + // Estimated: `3520` + // Minimum execution time: 11_071_000 picoseconds. + Weight::from_parts(11_553_000, 0) + .saturating_add(Weight::from_parts(0, 3520)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `131252` + // Estimated: `134587` + // Minimum execution time: 162_710_000 picoseconds. + Weight::from_parts(166_261_000, 0) + .saturating_add(Weight::from_parts(0, 134587)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `131252` + // Estimated: `134587` + // Minimum execution time: 200_138_000 picoseconds. + Weight::from_parts(210_177_000, 0) + .saturating_add(Weight::from_parts(0, 134587)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `131252` + // Estimated: `134587` + // Minimum execution time: 275_951_000 picoseconds. + Weight::from_parts(284_857_000, 0) + .saturating_add(Weight::from_parts(0, 134587)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_migrations.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_migrations.rs new file mode 100644 index 0000000000000..1f365e9e1ca3b --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_migrations.rs @@ -0,0 +1,225 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_migrations` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_migrations +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_migrations`. +pub struct WeightInfo(PhantomData); +impl pallet_migrations::WeightInfo for WeightInfo { + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn onboard_new_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `133` + // Estimated: `67035` + // Minimum execution time: 8_277_000 picoseconds. + Weight::from_parts(8_720_000, 0) + .saturating_add(Weight::from_parts(0, 67035)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn progress_mbms_none() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `67035` + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_267_000, 0) + .saturating_add(Weight::from_parts(0, 67035)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_completed() -> Weight { + // Proof Size summary in bytes: + // Measured: `129` + // Estimated: `3594` + // Minimum execution time: 6_651_000 picoseconds. + Weight::from_parts(6_996_000, 0) + .saturating_add(Weight::from_parts(0, 3594)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_skipped_historic() -> Weight { + // Proof Size summary in bytes: + // Measured: `187` + // Estimated: `3731` + // Minimum execution time: 15_181_000 picoseconds. + Weight::from_parts(15_509_000, 0) + .saturating_add(Weight::from_parts(0, 3731)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_advance() -> Weight { + // Proof Size summary in bytes: + // Measured: `133` + // Estimated: `3731` + // Minimum execution time: 11_200_000 picoseconds. + Weight::from_parts(11_718_000, 0) + .saturating_add(Weight::from_parts(0, 3731)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:1) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + fn exec_migration_complete() -> Weight { + // Proof Size summary in bytes: + // Measured: `133` + // Estimated: `3731` + // Minimum execution time: 12_857_000 picoseconds. + Weight::from_parts(13_172_000, 0) + .saturating_add(Weight::from_parts(0, 3731)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Storage: `MultiBlockMigrations::Historic` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn exec_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `133` + // Estimated: `3731` + // Minimum execution time: 13_892_000 picoseconds. + Weight::from_parts(14_323_000, 0) + .saturating_add(Weight::from_parts(0, 3731)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_init_loop() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 190_000 picoseconds. + Weight::from_parts(230_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_820_000 picoseconds. + Weight::from_parts(2_944_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:0 w:1) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + fn force_set_active_cursor() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_222_000 picoseconds. + Weight::from_parts(3_478_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MultiBlockMigrations::Cursor` (r:1 w:0) + /// Proof: `MultiBlockMigrations::Cursor` (`max_values`: Some(1), `max_size`: Some(65550), added: 66045, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x583359fe0e84d953a9dd84e8addb08a5` (r:1 w:0) + fn force_onboard_mbms() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `67035` + // Minimum execution time: 5_853_000 picoseconds. + Weight::from_parts(6_097_000, 0) + .saturating_add(Weight::from_parts(0, 67035)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `MultiBlockMigrations::Historic` (r:256 w:256) + /// Proof: `MultiBlockMigrations::Historic` (`max_values`: None, `max_size`: Some(266), added: 2741, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 256]`. + fn clear_historic(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `984 + n * (271 ±0)` + // Estimated: `3834 + n * (2740 ±0)` + // Minimum execution time: 20_676_000 picoseconds. + Weight::from_parts(19_067_906, 0) + .saturating_add(Weight::from_parts(0, 3834)) + // Standard Error: 3_366 + .saturating_add(Weight::from_parts(1_469_069, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2740).saturating_mul(n.into())) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `n` is `[0, 2048]`. + fn reset_pallet_migration(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1676 + n * (38 ±0)` + // Estimated: `754 + n * (39 ±0)` + // Minimum execution time: 1_916_000 picoseconds. + Weight::from_parts(2_009_000, 0) + .saturating_add(Weight::from_parts(0, 754)) + // Standard Error: 798 + .saturating_add(Weight::from_parts(880_656, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 39).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_mmr.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_mmr.rs new file mode 100644 index 0000000000000..52ae662e0ddcc --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_mmr.rs @@ -0,0 +1,82 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_mmr` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_mmr +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_mmr`. +pub struct WeightInfo(PhantomData); +impl pallet_mmr::WeightInfo for WeightInfo { + /// Storage: `Mmr::NumberOfLeaves` (r:1 w:1) + /// Proof: `Mmr::NumberOfLeaves` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Paras::Heads` (r:1025 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `BeefyMmrLeaf::BeefyNextAuthorities` (r:1 w:0) + /// Proof: `BeefyMmrLeaf::BeefyNextAuthorities` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: `Mmr::Nodes` (r:7 w:1) + /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Mmr::UseLocalStorage` (r:1 w:0) + /// Proof: `Mmr::UseLocalStorage` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Mmr::RootHash` (r:0 w:1) + /// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 1000]`. + fn on_initialize(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1071075` + // Estimated: `3608924 + x * (8 ±0)` + // Minimum execution time: 9_274_662_000 picoseconds. + Weight::from_parts(9_403_577_378, 0) + .saturating_add(Weight::from_parts(0, 3608924)) + // Standard Error: 3_804 + .saturating_add(Weight::from_parts(97_770, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(1032)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(x.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_multisig.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_multisig.rs new file mode 100644 index 0000000000000..68ea51a2da00d --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_multisig.rs @@ -0,0 +1,169 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_multisig +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_multisig`. +pub struct WeightInfo(PhantomData); +impl pallet_multisig::WeightInfo for WeightInfo { + fn poke_deposit(_s: u32, ) -> Weight { + Default::default() + } + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 15_742_000 picoseconds. + Weight::from_parts(16_689_158, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(425, 0).saturating_mul(z.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_create(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `267 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 51_695_000 picoseconds. + Weight::from_parts(39_502_473, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_991 + .saturating_add(Weight::from_parts(149_722, 0).saturating_mul(s.into())) + // Standard Error: 19 + .saturating_add(Weight::from_parts(1_920, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `286` + // Estimated: `6811` + // Minimum execution time: 36_027_000 picoseconds. + Weight::from_parts(23_708_974, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 886 + .saturating_add(Weight::from_parts(135_578, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_977, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `392 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 57_964_000 picoseconds. + Weight::from_parts(41_322_769, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_938 + .saturating_add(Weight::from_parts(187_486, 0).saturating_mul(s.into())) + // Standard Error: 18 + .saturating_add(Weight::from_parts(2_132, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_create(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `267 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 35_875_000 picoseconds. + Weight::from_parts(37_310_784, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_591 + .saturating_add(Weight::from_parts(162_975, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_approve(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `286` + // Estimated: `6811` + // Minimum execution time: 21_526_000 picoseconds. + Weight::from_parts(22_387_339, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 706 + .saturating_add(Weight::from_parts(146_192, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn cancel_as_multi(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `458 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 36_493_000 picoseconds. + Weight::from_parts(38_162_969, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_164 + .saturating_add(Weight::from_parts(153_723, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_offences.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_offences.rs new file mode 100644 index 0000000000000..f656cdb6754d1 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_offences.rs @@ -0,0 +1,124 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_offences` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_offences +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_offences`. +pub struct WeightInfo(PhantomData); +impl pallet_offences::WeightInfo for WeightInfo { + /// Storage: `Offences::ConcurrentReportsIndex` (r:1 w:1) + /// Proof: `Offences::ConcurrentReportsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Offences::Reports` (r:1 w:1) + /// Proof: `Offences::Reports` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::Invulnerables` (r:1 w:0) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Session::DisabledValidators` (r:1 w:1) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ValidatorSlashInEra` (r:1 w:1) + /// Proof: `Staking::ValidatorSlashInEra` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:1 w:1) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 16]`. + fn report_offence_grandpa(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `987` + // Estimated: `4452` + // Minimum execution time: 59_530_000 picoseconds. + Weight::from_parts(63_406_319, 0) + .saturating_add(Weight::from_parts(0, 4452)) + // Standard Error: 9_603 + .saturating_add(Weight::from_parts(530_243, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `Offences::ConcurrentReportsIndex` (r:1 w:1) + /// Proof: `Offences::ConcurrentReportsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Offences::Reports` (r:1 w:1) + /// Proof: `Offences::Reports` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::Invulnerables` (r:1 w:0) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Session::DisabledValidators` (r:1 w:1) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ValidatorSlashInEra` (r:1 w:1) + /// Proof: `Staking::ValidatorSlashInEra` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:1 w:1) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 16]`. + fn report_offence_babe(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `987` + // Estimated: `4452` + // Minimum execution time: 59_407_000 picoseconds. + Weight::from_parts(62_842_539, 0) + .saturating_add(Weight::from_parts(0, 4452)) + // Standard Error: 7_722 + .saturating_add(Weight::from_parts(546_569, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_parameters.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_parameters.rs new file mode 100644 index 0000000000000..95544527bdebe --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_parameters.rs @@ -0,0 +1,66 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_parameters` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_parameters +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_parameters`. +pub struct WeightInfo(PhantomData); +impl pallet_parameters::WeightInfo for WeightInfo { + /// Storage: `Parameters::Parameters` (r:1 w:1) + /// Proof: `Parameters::Parameters` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + fn set_parameter() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `3493` + // Minimum execution time: 8_918_000 picoseconds. + Weight::from_parts(9_567_000, 0) + .saturating_add(Weight::from_parts(0, 3493)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_preimage.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_preimage.rs new file mode 100644 index 0000000000000..d907f2f93199c --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_preimage.rs @@ -0,0 +1,269 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_preimage` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_preimage +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_preimage`. +pub struct WeightInfo(PhantomData); +impl pallet_preimage::WeightInfo for WeightInfo { + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `97` + // Estimated: `3568` + // Minimum execution time: 53_384_000 picoseconds. + Weight::from_parts(54_357_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + // Standard Error: 169 + .saturating_add(Weight::from_parts(17_382, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_requested_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `3556` + // Minimum execution time: 21_147_000 picoseconds. + Weight::from_parts(21_420_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 169 + .saturating_add(Weight::from_parts(17_415, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_no_deposit_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `3556` + // Minimum execution time: 20_034_000 picoseconds. + Weight::from_parts(20_458_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 170 + .saturating_add(Weight::from_parts(17_397, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unnote_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `281` + // Estimated: `3568` + // Minimum execution time: 72_904_000 picoseconds. + Weight::from_parts(75_167_000, 0) + .saturating_add(Weight::from_parts(0, 3568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unnote_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `3556` + // Minimum execution time: 43_235_000 picoseconds. + Weight::from_parts(47_464_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `188` + // Estimated: `3556` + // Minimum execution time: 35_314_000 picoseconds. + Weight::from_parts(38_994_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `3556` + // Minimum execution time: 32_514_000 picoseconds. + Weight::from_parts(34_566_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3556` + // Minimum execution time: 24_339_000 picoseconds. + Weight::from_parts(26_465_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_requested_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `3556` + // Minimum execution time: 24_256_000 picoseconds. + Weight::from_parts(25_363_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unrequest_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `144` + // Estimated: `3556` + // Minimum execution time: 42_454_000 picoseconds. + Weight::from_parts(46_055_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn unrequest_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `3556` + // Minimum execution time: 24_498_000 picoseconds. + Weight::from_parts(26_934_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn unrequest_multi_referenced_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `106` + // Estimated: `3556` + // Minimum execution time: 24_328_000 picoseconds. + Weight::from_parts(25_802_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1023 w:1023) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1023 w:1023) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1023 w:1023) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:0 w:1023) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 1024]`. + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (227 ±0)` + // Estimated: `990 + n * (2603 ±0)` + // Minimum execution time: 60_700_000 picoseconds. + Weight::from_parts(61_580_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 73_173 + .saturating_add(Weight::from_parts(60_030_952, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_proxy.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_proxy.rs new file mode 100644 index 0000000000000..3cce74d2d8737 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_proxy.rs @@ -0,0 +1,230 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_proxy +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + fn poke_deposit() -> Weight { + Default::default() + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `89 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 17_743_000 picoseconds. + Weight::from_parts(18_436_629, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_056 + .saturating_add(Weight::from_parts(43_916, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `416 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 41_728_000 picoseconds. + Weight::from_parts(42_605_142, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_675 + .saturating_add(Weight::from_parts(173_815, 0).saturating_mul(a.into())) + // Standard Error: 2_764 + .saturating_add(Weight::from_parts(29_849, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `331 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 29_396_000 picoseconds. + Weight::from_parts(31_069_465, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_248 + .saturating_add(Weight::from_parts(134_192, 0).saturating_mul(a.into())) + // Standard Error: 2_322 + .saturating_add(Weight::from_parts(7_479, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `331 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 28_816_000 picoseconds. + Weight::from_parts(30_383_460, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_128 + .saturating_add(Weight::from_parts(157_895, 0).saturating_mul(a.into())) + // Standard Error: 2_198 + .saturating_add(Weight::from_parts(10_169, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 37_628_000 picoseconds. + Weight::from_parts(39_513_043, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 2_451 + .saturating_add(Weight::from_parts(149_654, 0).saturating_mul(a.into())) + // Standard Error: 2_533 + .saturating_add(Weight::from_parts(17_215, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `89 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 28_284_000 picoseconds. + Weight::from_parts(29_549_215, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 4_083 + .saturating_add(Weight::from_parts(61_848, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `89 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 28_231_000 picoseconds. + Weight::from_parts(29_589_594, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_950 + .saturating_add(Weight::from_parts(54_339, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `89 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 25_116_000 picoseconds. + Weight::from_parts(26_314_944, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_968 + .saturating_add(Weight::from_parts(39_294, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `4706` + // Minimum execution time: 29_742_000 picoseconds. + Weight::from_parts(31_063_206, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_575 + .saturating_add(Weight::from_parts(22_471, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `126 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 26_141_000 picoseconds. + Weight::from_parts(27_309_074, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_808 + .saturating_add(Weight::from_parts(37_564, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_recovery.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_recovery.rs new file mode 100644 index 0000000000000..5a08499a9a7a0 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_recovery.rs @@ -0,0 +1,187 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_recovery` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_recovery +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_recovery`. +pub struct WeightInfo(PhantomData); +impl pallet_recovery::WeightInfo for WeightInfo { + /// Storage: `Recovery::Proxy` (r:1 w:0) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn as_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `182` + // Estimated: `3545` + // Minimum execution time: 13_838_000 picoseconds. + Weight::from_parts(14_446_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Recovery::Proxy` (r:0 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn set_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_795_000 picoseconds. + Weight::from_parts(8_019_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn create_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3816` + // Minimum execution time: 28_263_000 picoseconds. + Weight::from_parts(29_693_842, 0) + .saturating_add(Weight::from_parts(0, 3816)) + // Standard Error: 6_528 + .saturating_add(Weight::from_parts(122_020, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + fn initiate_recovery() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `3854` + // Minimum execution time: 33_442_000 picoseconds. + Weight::from_parts(35_142_000, 0) + .saturating_add(Weight::from_parts(0, 3854)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn vouch_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `261 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 22_866_000 picoseconds. + Weight::from_parts(23_906_291, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 4_647 + .saturating_add(Weight::from_parts(182_215, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Recoverable` (r:1 w:0) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn claim_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `293 + n * (64 ±0)` + // Estimated: `3854` + // Minimum execution time: 27_549_000 picoseconds. + Weight::from_parts(28_695_066, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 6_024 + .saturating_add(Weight::from_parts(122_957, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:1) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn close_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `414 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 38_880_000 picoseconds. + Weight::from_parts(40_901_189, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 8_408 + .saturating_add(Weight::from_parts(130_066, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Recovery::ActiveRecoveries` (r:1 w:0) + /// Proof: `Recovery::ActiveRecoveries` (`max_values`: None, `max_size`: Some(389), added: 2864, mode: `MaxEncodedLen`) + /// Storage: `Recovery::Recoverable` (r:1 w:1) + /// Proof: `Recovery::Recoverable` (`max_values`: None, `max_size`: Some(351), added: 2826, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 9]`. + fn remove_recovery(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `170 + n * (32 ±0)` + // Estimated: `3854` + // Minimum execution time: 32_722_000 picoseconds. + Weight::from_parts(34_426_673, 0) + .saturating_add(Weight::from_parts(0, 3854)) + // Standard Error: 7_747 + .saturating_add(Weight::from_parts(63_348, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Recovery::Proxy` (r:1 w:1) + /// Proof: `Recovery::Proxy` (`max_values`: None, `max_size`: Some(80), added: 2555, mode: `MaxEncodedLen`) + fn cancel_recovered() -> Weight { + // Proof Size summary in bytes: + // Measured: `182` + // Estimated: `3545` + // Minimum execution time: 15_521_000 picoseconds. + Weight::from_parts(15_984_000, 0) + .saturating_add(Weight::from_parts(0, 3545)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_referenda.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_referenda.rs new file mode 100644 index 0000000000000..04b881c18d69e --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_referenda.rs @@ -0,0 +1,518 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_referenda +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `Referenda::ReferendumCount` (r:1 w:1) + /// Proof: `Referenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `185` + // Estimated: `42428` + // Minimum execution time: 40_477_000 picoseconds. + Weight::from_parts(41_732_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `438` + // Estimated: `83866` + // Minimum execution time: 54_199_000 picoseconds. + Weight::from_parts(56_479_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3225` + // Estimated: `42428` + // Minimum execution time: 68_778_000 picoseconds. + Weight::from_parts(71_611_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3245` + // Estimated: `42428` + // Minimum execution time: 67_009_000 picoseconds. + Weight::from_parts(69_038_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `438` + // Estimated: `83866` + // Minimum execution time: 64_226_000 picoseconds. + Weight::from_parts(66_127_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `438` + // Estimated: `83866` + // Minimum execution time: 62_319_000 picoseconds. + Weight::from_parts(63_894_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `4401` + // Minimum execution time: 32_005_000 picoseconds. + Weight::from_parts(32_773_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `269` + // Estimated: `4401` + // Minimum execution time: 32_438_000 picoseconds. + Weight::from_parts(33_359_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `346` + // Estimated: `83866` + // Minimum execution time: 38_216_000 picoseconds. + Weight::from_parts(39_246_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:0) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `690` + // Estimated: `83866` + // Minimum execution time: 99_147_000 picoseconds. + Weight::from_parts(101_951_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:0) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `102` + // Estimated: `5477` + // Minimum execution time: 10_746_000 picoseconds. + Weight::from_parts(11_142_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3115` + // Estimated: `42428` + // Minimum execution time: 47_103_000 picoseconds. + Weight::from_parts(47_665_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3115` + // Estimated: `42428` + // Minimum execution time: 48_466_000 picoseconds. + Weight::from_parts(50_411_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 22_115_000 picoseconds. + Weight::from_parts(22_942_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 21_919_000 picoseconds. + Weight::from_parts(23_171_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2943` + // Estimated: `5477` + // Minimum execution time: 28_236_000 picoseconds. + Weight::from_parts(29_173_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:0) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Referenda::TrackQueue` (r:1 w:1) + /// Proof: `Referenda::TrackQueue` (`max_values`: None, `max_size`: Some(2012), added: 4487, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2963` + // Estimated: `5477` + // Minimum execution time: 27_496_000 picoseconds. + Weight::from_parts(28_821_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `298` + // Estimated: `42428` + // Minimum execution time: 27_704_000 picoseconds. + Weight::from_parts(28_725_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `346` + // Estimated: `42428` + // Minimum execution time: 27_023_000 picoseconds. + Weight::from_parts(27_790_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `4401` + // Minimum execution time: 17_950_000 picoseconds. + Weight::from_parts(18_748_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `346` + // Estimated: `42428` + // Minimum execution time: 34_445_000 picoseconds. + Weight::from_parts(35_513_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::DecidingCount` (r:1 w:1) + /// Proof: `Referenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `346` + // Estimated: `42428` + // Minimum execution time: 36_060_000 picoseconds. + Weight::from_parts(37_467_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `42428` + // Minimum execution time: 28_267_000 picoseconds. + Weight::from_parts(29_089_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `382` + // Estimated: `42428` + // Minimum execution time: 28_121_000 picoseconds. + Weight::from_parts(28_786_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `42428` + // Minimum execution time: 26_833_000 picoseconds. + Weight::from_parts(27_752_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `403` + // Estimated: `42428` + // Minimum execution time: 25_584_000 picoseconds. + Weight::from_parts(26_368_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `403` + // Estimated: `83866` + // Minimum execution time: 39_108_000 picoseconds. + Weight::from_parts(40_227_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `399` + // Estimated: `42428` + // Minimum execution time: 28_038_000 picoseconds. + Weight::from_parts(28_536_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:0) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:0 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4401` + // Minimum execution time: 25_211_000 picoseconds. + Weight::from_parts(26_519_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Referenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `Referenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(936), added: 3411, mode: `MaxEncodedLen`) + /// Storage: `Referenda::MetadataOf` (r:1 w:1) + /// Proof: `Referenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `283` + // Estimated: `4401` + // Minimum execution time: 20_464_000 picoseconds. + Weight::from_parts(21_232_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_referenda_referenda.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_referenda_referenda.rs new file mode 100644 index 0000000000000..dec3cb021af93 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_referenda_referenda.rs @@ -0,0 +1,523 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-06-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-e8ezs4ez-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot +// benchmark +// pallet +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --pallet=pallet_referenda +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: Referenda ReferendumCount (r:1 w:1) + /// Proof: Referenda ReferendumCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:0 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `186` + // Estimated: `42428` + // Minimum execution time: 39_146_000 picoseconds. + Weight::from_parts(40_383_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 51_385_000 picoseconds. + Weight::from_parts(52_701_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3196` + // Estimated: `42428` + // Minimum execution time: 70_018_000 picoseconds. + Weight::from_parts(75_868_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `3216` + // Estimated: `42428` + // Minimum execution time: 69_311_000 picoseconds. + Weight::from_parts(72_425_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 64_385_000 picoseconds. + Weight::from_parts(66_178_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `439` + // Estimated: `83866` + // Minimum execution time: 62_200_000 picoseconds. + Weight::from_parts(63_782_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `279` + // Estimated: `4401` + // Minimum execution time: 29_677_000 picoseconds. + Weight::from_parts(30_603_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `269` + // Estimated: `4401` + // Minimum execution time: 29_897_000 picoseconds. + Weight::from_parts(30_618_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `83866` + // Minimum execution time: 37_697_000 picoseconds. + Weight::from_parts(38_953_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:1 w:0) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `588` + // Estimated: `83866` + // Minimum execution time: 106_001_000 picoseconds. + Weight::from_parts(107_102_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda TrackQueue (r:1 w:0) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `102` + // Estimated: `5477` + // Minimum execution time: 8_987_000 picoseconds. + Weight::from_parts(9_431_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3116` + // Estimated: `42428` + // Minimum execution time: 55_344_000 picoseconds. + Weight::from_parts(58_026_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `3116` + // Estimated: `42428` + // Minimum execution time: 57_003_000 picoseconds. + Weight::from_parts(60_347_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 23_001_000 picoseconds. + Weight::from_parts(24_812_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `2939` + // Estimated: `5477` + // Minimum execution time: 23_299_000 picoseconds. + Weight::from_parts(24_465_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2943` + // Estimated: `5477` + // Minimum execution time: 28_223_000 picoseconds. + Weight::from_parts(29_664_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:0) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Referenda TrackQueue (r:1 w:1) + /// Proof: Referenda TrackQueue (max_values: None, max_size: Some(2012), added: 4487, mode: MaxEncodedLen) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2963` + // Estimated: `5477` + // Minimum execution time: 27_474_000 picoseconds. + Weight::from_parts(29_072_000, 0) + .saturating_add(Weight::from_parts(0, 5477)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `299` + // Estimated: `42428` + // Minimum execution time: 24_405_000 picoseconds. + Weight::from_parts(25_184_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 24_572_000 picoseconds. + Weight::from_parts(25_287_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `4401` + // Minimum execution time: 16_042_000 picoseconds. + Weight::from_parts(16_610_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 33_639_000 picoseconds. + Weight::from_parts(34_749_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda DecidingCount (r:1 w:1) + /// Proof: Referenda DecidingCount (max_values: None, max_size: Some(14), added: 2489, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `347` + // Estimated: `42428` + // Minimum execution time: 36_467_000 picoseconds. + Weight::from_parts(37_693_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 29_857_000 picoseconds. + Weight::from_parts(30_840_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `383` + // Estimated: `42428` + // Minimum execution time: 31_028_000 picoseconds. + Weight::from_parts(32_154_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 28_594_000 picoseconds. + Weight::from_parts(29_092_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `42428` + // Minimum execution time: 27_246_000 picoseconds. + Weight::from_parts(28_003_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:2 w:2) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: Scheduler Lookup (r:1 w:1) + /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `404` + // Estimated: `83866` + // Minimum execution time: 43_426_000 picoseconds. + Weight::from_parts(44_917_000, 0) + .saturating_add(Weight::from_parts(0, 83866)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:1) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Balances InactiveIssuance (r:1 w:0) + /// Proof: Balances InactiveIssuance (max_values: Some(1), max_size: Some(16), added: 511, mode: MaxEncodedLen) + /// Storage: Scheduler Agenda (r:1 w:1) + /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `42428` + // Minimum execution time: 30_285_000 picoseconds. + Weight::from_parts(31_575_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Preimage StatusFor (r:1 w:0) + /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:0 w:1) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `350` + // Estimated: `4401` + // Minimum execution time: 19_254_000 picoseconds. + Weight::from_parts(19_855_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: Referenda ReferendumInfoFor (r:1 w:0) + /// Proof: Referenda ReferendumInfoFor (max_values: None, max_size: Some(936), added: 3411, mode: MaxEncodedLen) + /// Storage: Referenda MetadataOf (r:1 w:1) + /// Proof: Referenda MetadataOf (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `283` + // Estimated: `4401` + // Minimum execution time: 16_957_000 picoseconds. + Weight::from_parts(17_556_000, 0) + .saturating_add(Weight::from_parts(0, 4401)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_scheduler.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_scheduler.rs new file mode 100644 index 0000000000000..4957a1c4504d8 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_scheduler.rs @@ -0,0 +1,291 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_scheduler` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_scheduler +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_scheduler`. +pub struct WeightInfo(PhantomData); +impl pallet_scheduler::WeightInfo for WeightInfo { + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn service_agendas_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `68` + // Estimated: `1489` + // Minimum execution time: 4_441_000 picoseconds. + Weight::from_parts(4_623_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 50]`. + fn service_agenda_base(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `115 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 4_664_000 picoseconds. + Weight::from_parts(11_471_926, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 3_126 + .saturating_add(Weight::from_parts(360_894, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_795_000 picoseconds. + Weight::from_parts(4_029_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `179 + s * (1 ±0)` + // Estimated: `3644 + s * (1 ±0)` + // Minimum execution time: 24_224_000 picoseconds. + Weight::from_parts(24_784_000, 0) + .saturating_add(Weight::from_parts(0, 3644)) + // Standard Error: 343 + .saturating_add(Weight::from_parts(30_390, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) + } + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn service_task_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_459_000 picoseconds. + Weight::from_parts(5_847_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_periodic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_816_000 picoseconds. + Weight::from_parts(3_937_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_306_000 picoseconds. + Weight::from_parts(2_511_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_296_000 picoseconds. + Weight::from_parts(2_431_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 49]`. + fn schedule(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `115 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 12_204_000 picoseconds. + Weight::from_parts(18_541_327, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 3_093 + .saturating_add(Weight::from_parts(407_367, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn cancel(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `115 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 20_962_000 picoseconds. + Weight::from_parts(21_273_113, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 1_869 + .saturating_add(Weight::from_parts(593_800, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 49]`. + fn schedule_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `292 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 15_672_000 picoseconds. + Weight::from_parts(23_639_859, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 4_219 + .saturating_add(Weight::from_parts(467_925, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn cancel_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `318 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 23_671_000 picoseconds. + Weight::from_parts(24_871_802, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 1_859 + .saturating_add(Weight::from_parts(629_450, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn schedule_retry(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `155` + // Estimated: `42428` + // Minimum execution time: 14_919_000 picoseconds. + Weight::from_parts(15_402_950, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 751 + .saturating_add(Weight::from_parts(30_479, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `8965` + // Estimated: `42428` + // Minimum execution time: 30_185_000 picoseconds. + Weight::from_parts(31_937_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `9643` + // Estimated: `42428` + // Minimum execution time: 38_375_000 picoseconds. + Weight::from_parts(39_701_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `8977` + // Estimated: `42428` + // Minimum execution time: 29_797_000 picoseconds. + Weight::from_parts(30_298_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `9655` + // Estimated: `42428` + // Minimum execution time: 37_375_000 picoseconds. + Weight::from_parts(38_679_000, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_session.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_session.rs new file mode 100644 index 0000000000000..1f960f20c8a24 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_session.rs @@ -0,0 +1,86 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_session` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_session +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_session`. +pub struct WeightInfo(PhantomData); +impl pallet_session::WeightInfo for WeightInfo { + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:6 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `1899` + // Estimated: `17739` + // Minimum execution time: 71_274_000 picoseconds. + Weight::from_parts(73_693_000, 0) + .saturating_add(Weight::from_parts(0, 17739)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:6) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn purge_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `1814` + // Estimated: `5279` + // Minimum execution time: 52_441_000 picoseconds. + Weight::from_parts(55_437_000, 0) + .saturating_add(Weight::from_parts(0, 5279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(7)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_sudo.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_sudo.rs new file mode 100644 index 0000000000000..7a607866902aa --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_sudo.rs @@ -0,0 +1,111 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_sudo` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_sudo +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_sudo`. +pub struct WeightInfo(PhantomData); +impl pallet_sudo::WeightInfo for WeightInfo { + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn set_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 14_510_000 picoseconds. + Weight::from_parts(15_008_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn sudo() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 15_250_000 picoseconds. + Weight::from_parts(15_782_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn sudo_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 15_455_000 picoseconds. + Weight::from_parts(16_025_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 13_514_000 picoseconds. + Weight::from_parts(14_155_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn check_only_sudo_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 7_772_000 picoseconds. + Weight::from_parts(8_189_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_timestamp.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_timestamp.rs new file mode 100644 index 0000000000000..eaf98d0fc315e --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_timestamp.rs @@ -0,0 +1,76 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_timestamp +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::CurrentSlot` (r:1 w:0) + /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `1493` + // Minimum execution time: 12_078_000 picoseconds. + Weight::from_parts(12_823_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `94` + // Estimated: `0` + // Minimum execution time: 4_639_000 picoseconds. + Weight::from_parts(4_786_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_transaction_payment.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_transaction_payment.rs new file mode 100644 index 0000000000000..8843834ff0b42 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_transaction_payment.rs @@ -0,0 +1,66 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_transaction_payment` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_transaction_payment +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_transaction_payment`. +pub struct WeightInfo(PhantomData); +impl pallet_transaction_payment::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn charge_transaction_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 42_713_000 picoseconds. + Weight::from_parts(43_379_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_treasury.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_treasury.rs new file mode 100644 index 0000000000000..2ff1b7ec304f9 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_treasury.rs @@ -0,0 +1,169 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_treasury` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_treasury +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_treasury`. +pub struct WeightInfo(PhantomData); +impl pallet_treasury::WeightInfo for WeightInfo { + /// Storage: `Treasury::ProposalCount` (r:1 w:1) + /// Proof: `Treasury::ProposalCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Proposals` (r:0 w:1) + /// Proof: `Treasury::Proposals` (`max_values`: None, `max_size`: Some(108), added: 2583, mode: `MaxEncodedLen`) + fn spend_local() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1887` + // Minimum execution time: 13_064_000 picoseconds. + Weight::from_parts(13_610_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Treasury::Approvals` (r:1 w:1) + /// Proof: `Treasury::Approvals` (`max_values`: Some(1), `max_size`: Some(402), added: 897, mode: `MaxEncodedLen`) + fn remove_approval() -> Weight { + // Proof Size summary in bytes: + // Measured: `227` + // Estimated: `1887` + // Minimum execution time: 7_097_000 picoseconds. + Weight::from_parts(7_538_000, 0) + .saturating_add(Weight::from_parts(0, 1887)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Deactivated` (r:1 w:1) + /// Proof: `Treasury::Deactivated` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Treasury::LastSpendPeriod` (r:1 w:1) + /// Proof: `Treasury::LastSpendPeriod` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 99]`. + fn on_initialize_proposals(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `350 + p * (1 ±0)` + // Estimated: `3593` + // Minimum execution time: 17_293_000 picoseconds. + Weight::from_parts(20_649_783, 0) + .saturating_add(Weight::from_parts(0, 3593)) + // Standard Error: 1_076 + .saturating_add(Weight::from_parts(61_157, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AssetRate::ConversionRateToNative` (r:1 w:0) + /// Proof: `AssetRate::ConversionRateToNative` (`max_values`: None, `max_size`: Some(1238), added: 3713, mode: `MaxEncodedLen`) + /// Storage: `Treasury::SpendCount` (r:1 w:1) + /// Proof: `Treasury::SpendCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Treasury::Spends` (r:0 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + fn spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `214` + // Estimated: `4703` + // Minimum execution time: 23_796_000 picoseconds. + Weight::from_parts(24_793_000, 0) + .saturating_add(Weight::from_parts(0, 4703)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `489` + // Estimated: `5318` + // Minimum execution time: 60_562_000 picoseconds. + Weight::from_parts(62_867_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `305` + // Estimated: `5318` + // Minimum execution time: 28_594_000 picoseconds. + Weight::from_parts(29_512_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Treasury::Spends` (r:1 w:1) + /// Proof: `Treasury::Spends` (`max_values`: None, `max_size`: Some(1853), added: 4328, mode: `MaxEncodedLen`) + fn void_spend() -> Weight { + // Proof Size summary in bytes: + // Measured: `277` + // Estimated: `5318` + // Minimum execution time: 18_432_000 picoseconds. + Weight::from_parts(19_026_000, 0) + .saturating_add(Weight::from_parts(0, 5318)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_utility.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_utility.rs new file mode 100644 index 0000000000000..752f3155d28fe --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_utility.rs @@ -0,0 +1,119 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_utility +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_587_000 picoseconds. + Weight::from_parts(5_743_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_917 + .saturating_add(Weight::from_parts(3_621_902, 0).saturating_mul(c.into())) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_464_000 picoseconds. + Weight::from_parts(4_707_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_481_000 picoseconds. + Weight::from_parts(5_555_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_110 + .saturating_add(Weight::from_parts(3_847_534, 0).saturating_mul(c.into())) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_415_000 picoseconds. + Weight::from_parts(7_617_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_332_000 picoseconds. + Weight::from_parts(5_576_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_069 + .saturating_add(Weight::from_parts(3_625_441, 0).saturating_mul(c.into())) + } + fn dispatch_as_fallible() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_330_000 picoseconds. + Weight::from_parts(7_583_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn if_else() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_846_000 picoseconds. + Weight::from_parts(9_337_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_vesting.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_vesting.rs new file mode 100644 index 0000000000000..25d1c9eab7146 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_vesting.rs @@ -0,0 +1,262 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_vesting` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_vesting +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_vesting`. +pub struct WeightInfo(PhantomData); +impl pallet_vesting::WeightInfo for WeightInfo { + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. + fn vest_locked(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `345 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 38_225_000 picoseconds. + Weight::from_parts(37_860_470, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_479 + .saturating_add(Weight::from_parts(41_149, 0).saturating_mul(l.into())) + // Standard Error: 2_631 + .saturating_add(Weight::from_parts(76_064, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. + fn vest_unlocked(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `345 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 40_682_000 picoseconds. + Weight::from_parts(40_558_815, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_473 + .saturating_add(Weight::from_parts(35_138, 0).saturating_mul(l.into())) + // Standard Error: 2_620 + .saturating_add(Weight::from_parts(72_425, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. + fn vest_other_locked(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `448 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 40_813_000 picoseconds. + Weight::from_parts(40_248_990, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_925 + .saturating_add(Weight::from_parts(47_778, 0).saturating_mul(l.into())) + // Standard Error: 3_425 + .saturating_add(Weight::from_parts(88_421, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. + fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `448 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 43_330_000 picoseconds. + Weight::from_parts(43_588_745, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 2_075 + .saturating_add(Weight::from_parts(35_838, 0).saturating_mul(l.into())) + // Standard Error: 3_693 + .saturating_add(Weight::from_parts(73_951, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[0, 27]`. + fn vested_transfer(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `519 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 80_026_000 picoseconds. + Weight::from_parts(82_148_674, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 3_243 + .saturating_add(Weight::from_parts(30_866, 0).saturating_mul(l.into())) + // Standard Error: 5_770 + .saturating_add(Weight::from_parts(99_755, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[0, 27]`. + fn force_vested_transfer(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `622 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `6196` + // Minimum execution time: 81_979_000 picoseconds. + Weight::from_parts(83_373_383, 0) + .saturating_add(Weight::from_parts(0, 6196)) + // Standard Error: 3_069 + .saturating_add(Weight::from_parts(49_002, 0).saturating_mul(l.into())) + // Standard Error: 5_460 + .saturating_add(Weight::from_parts(105_265, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `345 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 39_190_000 picoseconds. + Weight::from_parts(38_673_517, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_789 + .saturating_add(Weight::from_parts(38_146, 0).saturating_mul(l.into())) + // Standard Error: 3_305 + .saturating_add(Weight::from_parts(97_870, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `345 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 42_229_000 picoseconds. + Weight::from_parts(42_040_081, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_659 + .saturating_add(Weight::from_parts(38_531, 0).saturating_mul(l.into())) + // Standard Error: 3_065 + .saturating_add(Weight::from_parts(76_527, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Vesting::Vesting` (r:1 w:1) + /// Proof: `Vesting::Vesting` (`max_values`: None, `max_size`: Some(1057), added: 3532, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. + fn force_remove_vesting_schedule(l: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `519 + l * (25 ±0) + s * (36 ±0)` + // Estimated: `4764` + // Minimum execution time: 46_474_000 picoseconds. + Weight::from_parts(46_105_020, 0) + .saturating_add(Weight::from_parts(0, 4764)) + // Standard Error: 1_706 + .saturating_add(Weight::from_parts(39_879, 0).saturating_mul(l.into())) + // Standard Error: 3_151 + .saturating_add(Weight::from_parts(87_824, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_whitelist.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_whitelist.rs new file mode 100644 index 0000000000000..5a37b7516158e --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_whitelist.rs @@ -0,0 +1,127 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_whitelist` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_whitelist +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_whitelist`. +pub struct WeightInfo(PhantomData); +impl pallet_whitelist::WeightInfo for WeightInfo { + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn whitelist_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3556` + // Minimum execution time: 23_699_000 picoseconds. + Weight::from_parts(24_759_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn remove_whitelisted_call() -> Weight { + // Proof Size summary in bytes: + // Measured: `247` + // Estimated: `3556` + // Minimum execution time: 24_245_000 picoseconds. + Weight::from_parts(25_258_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 4194294]`. + fn dispatch_whitelisted_call(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `323 + n * (1 ±0)` + // Estimated: `3787 + n * (1 ±0)` + // Minimum execution time: 35_734_000 picoseconds. + Weight::from_parts(36_868_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) + // Standard Error: 344 + .saturating_add(Weight::from_parts(31_003, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) + } + /// Storage: `Whitelist::WhitelistedCall` (r:1 w:1) + /// Proof: `Whitelist::WhitelistedCall` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 10000]`. + fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `247` + // Estimated: `3556` + // Minimum execution time: 27_147_000 picoseconds. + Weight::from_parts(28_547_069, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_405, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_xcm.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_xcm.rs new file mode 100644 index 0000000000000..62ab9710ab35e --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/pallet_xcm.rs @@ -0,0 +1,381 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_xcm` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_xcm +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn send() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `3677` + // Minimum execution time: 41_335_000 picoseconds. + Weight::from_parts(42_659_000, 0) + .saturating_add(Weight::from_parts(0, 3677)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `315` + // Estimated: `6196` + // Minimum execution time: 146_685_000 picoseconds. + Weight::from_parts(152_738_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `367` + // Estimated: `6196` + // Minimum execution time: 145_601_000 picoseconds. + Weight::from_parts(153_228_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `315` + // Estimated: `6196` + // Minimum execution time: 148_866_000 picoseconds. + Weight::from_parts(152_293_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn execute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 9_855_000 picoseconds. + Weight::from_parts(10_341_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_483_000 picoseconds. + Weight::from_parts(8_898_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn force_default_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_570_000 picoseconds. + Weight::from_parts(2_709_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_subscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `3677` + // Minimum execution time: 48_311_000 picoseconds. + Weight::from_parts(49_878_000, 0) + .saturating_add(Weight::from_parts(0, 3677)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_unsubscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `392` + // Estimated: `3857` + // Minimum execution time: 53_270_000 picoseconds. + Weight::from_parts(55_216_000, 0) + .saturating_add(Weight::from_parts(0, 3857)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `XcmPallet::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_538_000 picoseconds. + Weight::from_parts(2_717_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmPallet::SupportedVersion` (r:6 w:2) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_supported_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `22` + // Estimated: `15862` + // Minimum execution time: 22_579_000 picoseconds. + Weight::from_parts(22_973_000, 0) + .saturating_add(Weight::from_parts(0, 15862)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::VersionNotifiers` (r:6 w:2) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notifiers() -> Weight { + // Proof Size summary in bytes: + // Measured: `26` + // Estimated: `15866` + // Minimum execution time: 22_803_000 picoseconds. + Weight::from_parts(23_241_000, 0) + .saturating_add(Weight::from_parts(0, 15866)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:7 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn already_notified_target() -> Weight { + // Proof Size summary in bytes: + // Measured: `40` + // Estimated: `18355` + // Minimum execution time: 25_760_000 picoseconds. + Weight::from_parts(26_511_000, 0) + .saturating_add(Weight::from_parts(0, 18355)) + .saturating_add(T::DbWeight::get().reads(7)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn notify_current_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `211` + // Estimated: `6151` + // Minimum execution time: 36_850_000 picoseconds. + Weight::from_parts(38_086_000, 0) + .saturating_add(Weight::from_parts(0, 6151)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn notify_target_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 17_893_000 picoseconds. + Weight::from_parts(18_327_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) + .saturating_add(T::DbWeight::get().reads(5)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notify_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `33` + // Estimated: `15873` + // Minimum execution time: 22_807_000 picoseconds. + Weight::from_parts(23_148_000, 0) + .saturating_add(Weight::from_parts(0, 15873)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:6 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_and_notify_old_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `211` + // Estimated: `16051` + // Minimum execution time: 48_558_000 picoseconds. + Weight::from_parts(50_046_000, 0) + .saturating_add(Weight::from_parts(0, 16051)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_757_000 picoseconds. + Weight::from_parts(2_981_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7576` + // Estimated: `11041` + // Minimum execution time: 27_384_000 picoseconds. + Weight::from_parts(27_705_000, 0) + .saturating_add(Weight::from_parts(0, 11041)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmPallet::ShouldRecordXcm` (r:1 w:0) + /// Proof: `XcmPallet::ShouldRecordXcm` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn claim_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 40_784_000 picoseconds. + Weight::from_parts(42_298_000, 0) + .saturating_add(Weight::from_parts(0, 3488)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn add_authorized_alias() -> Weight { + Weight::from_parts(100_000, 0) + } + fn remove_authorized_alias() -> Weight { + Weight::from_parts(100_000, 0) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_assigned_slots.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_assigned_slots.rs new file mode 100644 index 0000000000000..f001aa5074cec --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_assigned_slots.rs @@ -0,0 +1,154 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_common::assigned_slots` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_common::assigned_slots +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_common::assigned_slots`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_common::assigned_slots::WeightInfo for WeightInfo { + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:1) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:0) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::PermanentSlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::MaxPermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::MaxPermanentSlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn assign_perm_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `660` + // Estimated: `4125` + // Minimum execution time: 81_431_000 picoseconds. + Weight::from_parts(84_673_000, 0) + .saturating_add(Weight::from_parts(0, 4125)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::TemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::MaxTemporarySlots` (r:1 w:0) + /// Proof: `AssignedSlots::MaxTemporarySlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::ActiveTemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::ActiveTemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn assign_temp_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `660` + // Estimated: `4125` + // Minimum execution time: 87_650_000 picoseconds. + Weight::from_parts(91_880_000, 0) + .saturating_add(Weight::from_parts(0, 4125)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AssignedSlots::PermanentSlots` (r:1 w:0) + /// Proof: `AssignedSlots::PermanentSlots` (`max_values`: None, `max_size`: Some(20), added: 2495, mode: `MaxEncodedLen`) + /// Storage: `AssignedSlots::TemporarySlots` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlots` (`max_values`: None, `max_size`: Some(61), added: 2536, mode: `MaxEncodedLen`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AssignedSlots::TemporarySlotCount` (r:1 w:1) + /// Proof: `AssignedSlots::TemporarySlotCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn unassign_parachain_slot() -> Weight { + // Proof Size summary in bytes: + // Measured: `626` + // Estimated: `4091` + // Minimum execution time: 56_736_000 picoseconds. + Weight::from_parts(60_568_000, 0) + .saturating_add(Weight::from_parts(0, 4091)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AssignedSlots::MaxPermanentSlots` (r:0 w:1) + /// Proof: `AssignedSlots::MaxPermanentSlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_max_permanent_slots() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_879_000 picoseconds. + Weight::from_parts(6_200_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AssignedSlots::MaxTemporarySlots` (r:0 w:1) + /// Proof: `AssignedSlots::MaxTemporarySlots` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_max_temporary_slots() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_840_000 picoseconds. + Weight::from_parts(6_113_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_auctions.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_auctions.rs new file mode 100644 index 0000000000000..206e5ad7620de --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_auctions.rs @@ -0,0 +1,142 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_common::auctions` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_common::auctions +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_common::auctions`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_common::auctions::WeightInfo for WeightInfo { + /// Storage: `Auctions::AuctionInfo` (r:1 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:1) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn new_auction() -> Weight { + // Proof Size summary in bytes: + // Measured: `4` + // Estimated: `1493` + // Minimum execution time: 9_527_000 picoseconds. + Weight::from_parts(9_961_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:2 w:2) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn bid() -> Weight { + // Proof Size summary in bytes: + // Measured: `767` + // Estimated: `6060` + // Minimum execution time: 85_758_000 picoseconds. + Weight::from_parts(91_279_000, 0) + .saturating_add(Weight::from_parts(0, 6060)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Auctions::AuctionInfo` (r:1 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Babe::NextRandomness` (r:1 w:0) + /// Proof: `Babe::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Babe::EpochStart` (r:1 w:0) + /// Proof: `Babe::EpochStart` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Slots::Leases` (r:7 w:7) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn on_initialize() -> Weight { + // Proof Size summary in bytes: + // Measured: `6947156` + // Estimated: `15822990` + // Minimum execution time: 8_791_793_000 picoseconds. + Weight::from_parts(8_956_541_000, 0) + .saturating_add(Weight::from_parts(0, 15822990)) + .saturating_add(T::DbWeight::get().reads(3687)) + .saturating_add(T::DbWeight::get().writes(3682)) + } + /// Storage: `Auctions::ReservedAmounts` (r:37 w:36) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:36 w:36) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Auctions::Winning` (r:3600 w:3600) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::AuctionInfo` (r:0 w:1) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn cancel_auction() -> Weight { + // Proof Size summary in bytes: + // Measured: `177903` + // Estimated: `15822990` + // Minimum execution time: 6_680_245_000 picoseconds. + Weight::from_parts(6_821_440_000, 0) + .saturating_add(Weight::from_parts(0, 15822990)) + .saturating_add(T::DbWeight::get().reads(3673)) + .saturating_add(T::DbWeight::get().writes(3673)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_crowdloan.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_crowdloan.rs new file mode 100644 index 0000000000000..161acac68399a --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_crowdloan.rs @@ -0,0 +1,222 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_common::crowdloan` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_common::crowdloan +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_common::crowdloan`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_common::crowdloan::WeightInfo for WeightInfo { + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NextFundIndex` (r:1 w:1) + /// Proof: `Crowdloan::NextFundIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn create() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `3865` + // Minimum execution time: 76_586_000 picoseconds. + Weight::from_parts(81_924_000, 0) + .saturating_add(Weight::from_parts(0, 3865)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:0) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + fn contribute() -> Weight { + // Proof Size summary in bytes: + // Measured: `530` + // Estimated: `3995` + // Minimum execution time: 160_999_000 picoseconds. + Weight::from_parts(168_997_000, 0) + .saturating_add(Weight::from_parts(0, 3995)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xc85982571aa615c788ef9b2c16f54f25773fd439e8ee1ed2aa3ae43d48e880f0` (r:1 w:1) + fn withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `687` + // Estimated: `6196` + // Minimum execution time: 108_145_000 picoseconds. + Weight::from_parts(114_236_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `k` is `[0, 500]`. + fn refund(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `316 + k * (189 ±0)` + // Estimated: `325 + k * (190 ±0)` + // Minimum execution time: 60_609_000 picoseconds. + Weight::from_parts(63_614_000, 0) + .saturating_add(Weight::from_parts(0, 325)) + // Standard Error: 17_663 + .saturating_add(Weight::from_parts(45_128_820, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(k.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 190).saturating_mul(k.into())) + } + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn dissolve() -> Weight { + // Proof Size summary in bytes: + // Measured: `514` + // Estimated: `6196` + // Minimum execution time: 69_115_000 picoseconds. + Weight::from_parts(73_853_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Crowdloan::Funds` (r:1 w:1) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn edit() -> Weight { + // Proof Size summary in bytes: + // Measured: `234` + // Estimated: `3699` + // Minimum execution time: 37_142_000 picoseconds. + Weight::from_parts(39_142_000, 0) + .saturating_add(Weight::from_parts(0, 3699)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xd861ea1ebf4800d4b89f4ff787ad79ee96d9a708c85b57da7eb8f9ddeda61291` (r:1 w:1) + fn add_memo() -> Weight { + // Proof Size summary in bytes: + // Measured: `412` + // Estimated: `3877` + // Minimum execution time: 41_654_000 picoseconds. + Weight::from_parts(44_008_000, 0) + .saturating_add(Weight::from_parts(0, 3877)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Crowdloan::Funds` (r:1 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn poke() -> Weight { + // Proof Size summary in bytes: + // Measured: `238` + // Estimated: `3703` + // Minimum execution time: 28_066_000 picoseconds. + Weight::from_parts(30_228_000, 0) + .saturating_add(Weight::from_parts(0, 3703)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Auctions::AuctionInfo` (r:1 w:0) + /// Proof: `Auctions::AuctionInfo` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Crowdloan::EndingsCount` (r:1 w:1) + /// Proof: `Crowdloan::EndingsCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::NewRaise` (r:1 w:1) + /// Proof: `Crowdloan::NewRaise` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:100 w:0) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::AuctionCounter` (r:1 w:0) + /// Proof: `Auctions::AuctionCounter` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Paras::ParaLifecycles` (r:100 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:100 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Auctions::Winning` (r:1 w:1) + /// Proof: `Auctions::Winning` (`max_values`: None, `max_size`: Some(1920), added: 4395, mode: `MaxEncodedLen`) + /// Storage: `Auctions::ReservedAmounts` (r:100 w:100) + /// Proof: `Auctions::ReservedAmounts` (`max_values`: None, `max_size`: Some(60), added: 2535, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:100 w:100) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `n` is `[2, 100]`. + fn on_initialize(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `233 + n * (360 ±0)` + // Estimated: `5385 + n * (2835 ±0)` + // Minimum execution time: 154_127_000 picoseconds. + Weight::from_parts(19_026_689, 0) + .saturating_add(Weight::from_parts(0, 5385)) + // Standard Error: 57_680 + .saturating_add(Weight::from_parts(65_569_682, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2835).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_identity_migrator.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_identity_migrator.rs new file mode 100644 index 0000000000000..e543da4aaddbe --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_identity_migrator.rs @@ -0,0 +1,107 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_common::identity_migrator +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7488 + r * (5 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (7 ±0) + s * (32 ±0)` + // Minimum execution time: 182_585_000 picoseconds. + Weight::from_parts(189_454_318, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 22_079 + .saturating_add(Weight::from_parts(181_699, 0).saturating_mul(r.into())) + // Standard Error: 4_522 + .saturating_add(Weight::from_parts(1_520_911, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 7).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7241` + // Estimated: `11003` + // Minimum execution time: 124_068_000 picoseconds. + Weight::from_parts(125_776_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_paras_registrar.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_paras_registrar.rs new file mode 100644 index 0000000000000..0f3bef68f8c22 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_paras_registrar.rs @@ -0,0 +1,227 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_common::paras_registrar` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_common::paras_registrar +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_common::paras_registrar`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_common::paras_registrar::WeightInfo for WeightInfo { + /// Storage: `Registrar::NextFreeParaId` (r:1 w:1) + /// Proof: `Registrar::NextFreeParaId` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `59` + // Estimated: `3524` + // Minimum execution time: 33_477_000 picoseconds. + Weight::from_parts(34_970_000, 0) + .saturating_add(Weight::from_parts(0, 3524)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `315` + // Estimated: `3780` + // Minimum execution time: 53_331_984_000 picoseconds. + Weight::from_parts(53_731_706_000, 0) + .saturating_add(Weight::from_parts(0, 3780)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:0 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingParasGenesis` (r:0 w:1) + /// Proof: `Paras::UpcomingParasGenesis` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_register() -> Weight { + // Proof Size summary in bytes: + // Measured: `232` + // Estimated: `3697` + // Minimum execution time: 53_277_823_000 picoseconds. + Weight::from_parts(53_476_481_000, 0) + .saturating_add(Weight::from_parts(0, 3697)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Registrar::Paras` (r:1 w:1) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `Registrar::PendingSwap` (r:0 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn deregister() -> Weight { + // Proof Size summary in bytes: + // Measured: `462` + // Estimated: `3927` + // Minimum execution time: 64_109_000 picoseconds. + Weight::from_parts(70_181_000, 0) + .saturating_add(Weight::from_parts(0, 3927)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Registrar::Paras` (r:1 w:0) + /// Proof: `Registrar::Paras` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:2 w:2) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Registrar::PendingSwap` (r:1 w:1) + /// Proof: `Registrar::PendingSwap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Crowdloan::Funds` (r:2 w:2) + /// Proof: `Crowdloan::Funds` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:2 w:2) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:0) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn swap() -> Weight { + // Proof Size summary in bytes: + // Measured: `863` + // Estimated: `6803` + // Minimum execution time: 113_721_000 picoseconds. + Weight::from_parts(119_828_000, 0) + .saturating_add(Weight::from_parts(0, 6803)) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[9, 3145728]`. + fn schedule_code_upgrade(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `201` + // Estimated: `3666` + // Minimum execution time: 44_578_000 picoseconds. + Weight::from_parts(45_244_000, 0) + .saturating_add(Weight::from_parts(0, 3666)) + // Standard Error: 130 + .saturating_add(Weight::from_parts(13_412, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1048576]`. + fn set_current_head(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_410_000 picoseconds. + Weight::from_parts(7_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 42 + .saturating_add(Weight::from_parts(4_560, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_slots.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_slots.rs new file mode 100644 index 0000000000000..d722494154216 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_common_slots.rs @@ -0,0 +1,132 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_common::slots` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_common::slots +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_common::slots`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_common::slots::WeightInfo for WeightInfo { + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_lease() -> Weight { + // Proof Size summary in bytes: + // Measured: `287` + // Estimated: `3752` + // Minimum execution time: 30_895_000 picoseconds. + Weight::from_parts(32_407_000, 0) + .saturating_add(Weight::from_parts(0, 3752)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Paras::Parachains` (r:1 w:0) + /// Proof: `Paras::Parachains` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Slots::Leases` (r:101 w:100) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:200 w:200) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 100]`. + /// The range of component `t` is `[0, 100]`. + fn manage_lease_period_start(c: u32, t: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `561 + c * (20 ±0) + t * (234 ±0)` + // Estimated: `4032 + c * (2496 ±0) + t * (2709 ±0)` + // Minimum execution time: 864_249_000 picoseconds. + Weight::from_parts(871_593_000, 0) + .saturating_add(Weight::from_parts(0, 4032)) + // Standard Error: 106_900 + .saturating_add(Weight::from_parts(3_456_149, 0).saturating_mul(c.into())) + // Standard Error: 106_900 + .saturating_add(Weight::from_parts(10_757_149, 0).saturating_mul(t.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(t.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(t.into()))) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2709).saturating_mul(t.into())) + } + /// Storage: `Slots::Leases` (r:1 w:1) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:8 w:8) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn clear_all_leases() -> Weight { + // Proof Size summary in bytes: + // Measured: `2759` + // Estimated: `21814` + // Minimum execution time: 151_565_000 picoseconds. + Weight::from_parts(158_584_000, 0) + .saturating_add(Weight::from_parts(0, 21814)) + .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().writes(9)) + } + /// Storage: `Slots::Leases` (r:1 w:0) + /// Proof: `Slots::Leases` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:1) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn trigger_onboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `579` + // Estimated: `4044` + // Minimum execution time: 43_389_000 picoseconds. + Weight::from_parts(46_029_000, 0) + .saturating_add(Weight::from_parts(0, 4044)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_configuration.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_configuration.rs new file mode 100644 index 0000000000000..9958eb0311134 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_configuration.rs @@ -0,0 +1,192 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::configuration` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::configuration +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::configuration`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::configuration::WeightInfo for WeightInfo { + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_block_number() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_475_000 picoseconds. + Weight::from_parts(11_132_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_u32() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_419_000 picoseconds. + Weight::from_parts(11_052_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_option_u32() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_613_000 picoseconds. + Weight::from_parts(11_170_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_hrmp_open_request_ttl() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_000_000_000_000 picoseconds. + Weight::from_parts(2_000_000_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_balance() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_818_000 picoseconds. + Weight::from_parts(11_183_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_executor_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 12_672_000 picoseconds. + Weight::from_parts(13_154_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_perbill() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_814_000 picoseconds. + Weight::from_parts(11_120_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 12_919_000 picoseconds. + Weight::from_parts(13_455_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_scheduler_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_495_000 picoseconds. + Weight::from_parts(11_189_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_coretime.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_coretime.rs new file mode 100644 index 0000000000000..8b661c7f62316 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_coretime.rs @@ -0,0 +1,123 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::coretime` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::coretime +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::coretime`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::coretime::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn request_revenue_at() -> Weight { + // Proof Size summary in bytes: + // Measured: `3083` + // Estimated: `6548` + // Minimum execution time: 99_512_000 picoseconds. + Weight::from_parts(103_004_000, 0) + .saturating_add(Weight::from_parts(0, 6548)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn request_core_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_672_000 picoseconds. + Weight::from_parts(11_107_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreSchedules` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 100]`. + fn assign_core(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 15_678_000 picoseconds. + Weight::from_parts(16_437_686, 0) + .saturating_add(Weight::from_parts(0, 3612)) + // Standard Error: 342 + .saturating_add(Weight::from_parts(10_353, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `OnDemandAssignmentProvider::Credits` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Credits` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn credit_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 10_043_000 picoseconds. + Weight::from_parts(10_441_000, 0) + .saturating_add(Weight::from_parts(0, 3607)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_disputes.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_disputes.rs new file mode 100644 index 0000000000000..918f721ad2031 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_disputes.rs @@ -0,0 +1,65 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::disputes` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::disputes +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::disputes`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::disputes::WeightInfo for WeightInfo { + /// Storage: `ParasDisputes::Frozen` (r:0 w:1) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_unfreeze() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_498_000 picoseconds. + Weight::from_parts(2_743_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_disputes_slashing.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_disputes_slashing.rs new file mode 100644 index 0000000000000..772c72eaea80f --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_disputes_slashing.rs @@ -0,0 +1,96 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::disputes::slashing` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::disputes::slashing +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::disputes::slashing`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::disputes::slashing::WeightInfo for WeightInfo { + /// Storage: `Session::CurrentIndex` (r:1 w:0) + /// Proof: `Session::CurrentIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Historical::HistoricalSessions` (r:1 w:0) + /// Proof: `Historical::HistoricalSessions` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `ParasSlashing::UnappliedSlashes` (r:1 w:1) + /// Proof: `ParasSlashing::UnappliedSlashes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Offences::ConcurrentReportsIndex` (r:1 w:1) + /// Proof: `Offences::ConcurrentReportsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Offences::Reports` (r:1 w:1) + /// Proof: `Offences::Reports` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::Invulnerables` (r:1 w:0) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Session::DisabledValidators` (r:1 w:1) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ValidatorSlashInEra` (r:1 w:1) + /// Proof: `Staking::ValidatorSlashInEra` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:1 w:1) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// The range of component `n` is `[4, 300]`. + fn report_dispute_lost_unsigned(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2024 + n * (33 ±0)` + // Estimated: `5386 + n * (34 ±0)` + // Minimum execution time: 88_786_000 picoseconds. + Weight::from_parts(127_346_367, 0) + .saturating_add(Weight::from_parts(0, 5386)) + // Standard Error: 3_530 + .saturating_add(Weight::from_parts(144_389, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(14)) + .saturating_add(T::DbWeight::get().writes(7)) + .saturating_add(Weight::from_parts(0, 34).saturating_mul(n.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_hrmp.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_hrmp.rs new file mode 100644 index 0000000000000..ee82df846da7a --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_hrmp.rs @@ -0,0 +1,384 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::hrmp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::hrmp +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::hrmp`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::hrmp::WeightInfo for WeightInfo { + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn hrmp_init_open_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `477` + // Estimated: `3942` + // Minimum execution time: 56_211_000 picoseconds. + Weight::from_parts(57_621_000, 0) + .saturating_add(Weight::from_parts(0, 3942)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn hrmp_accept_open_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `689` + // Estimated: `4154` + // Minimum execution time: 53_758_000 picoseconds. + Weight::from_parts(56_592_000, 0) + .saturating_add(Weight::from_parts(0, 4154)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn hrmp_close_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `802` + // Estimated: `4267` + // Minimum execution time: 55_327_000 picoseconds. + Weight::from_parts(57_050_000, 0) + .saturating_add(Weight::from_parts(0, 4267)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:254 w:254) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:254) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:0 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 127]`. + /// The range of component `e` is `[0, 127]`. + fn force_clean_hrmp(i: u32, e: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `264 + e * (100 ±0) + i * (100 ±0)` + // Estimated: `3726 + e * (2575 ±0) + i * (2575 ±0)` + // Minimum execution time: 1_559_120_000 picoseconds. + Weight::from_parts(1_571_304_000, 0) + .saturating_add(Weight::from_parts(0, 3726)) + // Standard Error: 144_726 + .saturating_add(Weight::from_parts(4_485_208, 0).saturating_mul(i.into())) + // Standard Error: 144_726 + .saturating_add(Weight::from_parts(4_575_293, 0).saturating_mul(e.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(e.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(i.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(e.into()))) + .saturating_add(Weight::from_parts(0, 2575).saturating_mul(e.into())) + .saturating_add(Weight::from_parts(0, 2575).saturating_mul(i.into())) + } + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:256 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:128 w:128) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 128]`. + fn force_process_hrmp_open(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `492 + c * (136 ±0)` + // Estimated: `1947 + c * (5086 ±0)` + // Minimum execution time: 11_281_000 picoseconds. + Weight::from_parts(11_631_000, 0) + .saturating_add(Weight::from_parts(0, 1947)) + // Standard Error: 20_254 + .saturating_add(Weight::from_parts(27_481_196, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 5086).saturating_mul(c.into())) + } + /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:128 w:128) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:0 w:128) + /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:128) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 128]`. + fn force_process_hrmp_close(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `335 + c * (124 ±0)` + // Estimated: `1795 + c * (2600 ±0)` + // Minimum execution time: 10_044_000 picoseconds. + Weight::from_parts(10_289_000, 0) + .saturating_add(Weight::from_parts(0, 1795)) + // Standard Error: 14_359 + .saturating_add(Weight::from_parts(16_831_230, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2600).saturating_mul(c.into())) + } + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 128]`. + fn hrmp_cancel_open_request(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1026 + c * (13 ±0)` + // Estimated: `4295 + c * (15 ±0)` + // Minimum execution time: 25_372_000 picoseconds. + Weight::from_parts(31_619_390, 0) + .saturating_add(Weight::from_parts(0, 4295)) + // Standard Error: 3_058 + .saturating_add(Weight::from_parts(234_494, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 15).saturating_mul(c.into())) + } + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 128]`. + fn clean_open_channel_requests(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `243 + c * (63 ±0)` + // Estimated: `1722 + c * (2538 ±0)` + // Minimum execution time: 8_088_000 picoseconds. + Weight::from_parts(2_136_112, 0) + .saturating_add(Weight::from_parts(0, 1722)) + // Standard Error: 7_192 + .saturating_add(Weight::from_parts(4_626_808, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2538).saturating_mul(c.into())) + } + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:2 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:2 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[0, 1]`. + fn force_open_hrmp_channel(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `477 + c * (235 ±0)` + // Estimated: `6417 + c * (235 ±0)` + // Minimum execution time: 79_061_000 picoseconds. + Weight::from_parts(83_104_559, 0) + .saturating_add(Weight::from_parts(0, 6417)) + // Standard Error: 325_060 + .saturating_add(Weight::from_parts(16_143_240, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(8)) + .saturating_add(Weight::from_parts(0, 235).saturating_mul(c.into())) + } + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:1 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:2 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:2 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn establish_system_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `477` + // Estimated: `6417` + // Minimum execution time: 80_071_000 picoseconds. + Weight::from_parts(83_317_000, 0) + .saturating_add(Weight::from_parts(0, 6417)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Hrmp::HrmpChannels` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn poke_channel_deposits() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `3728` + // Minimum execution time: 18_706_000 picoseconds. + Weight::from_parts(19_748_000, 0) + .saturating_add(Weight::from_parts(0, 3728)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Paras::ParaLifecycles` (r:2 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:2 w:2) + /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:2 w:0) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:2 w:0) + /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:2 w:2) + /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1) + /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:2 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:2 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:2 w:0) + /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:2 w:2) + /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn establish_channel_with_system() -> Weight { + // Proof Size summary in bytes: + // Measured: `477` + // Estimated: `6417` + // Minimum execution time: 129_253_000 picoseconds. + Weight::from_parts(134_005_000, 0) + .saturating_add(Weight::from_parts(0, 6417)) + .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().writes(11)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_inclusion.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_inclusion.rs new file mode 100644 index 0000000000000..dd2004d948849 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_inclusion.rs @@ -0,0 +1,138 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::inclusion` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::inclusion +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::inclusion`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::inclusion::WeightInfo for WeightInfo { + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:2) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:3 w:3) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannels` (r:2 w:2) + /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelContents` (r:2 w:2) + /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// The range of component `u` is `[0, 2]`. + /// The range of component `h` is `[0, 2]`. + /// The range of component `c` is `[0, 1]`. + fn enact_candidate(u: u32, h: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1447 + c * (15992 ±0) + h * (92 ±0) + u * (131259 ±0)` + // Estimated: `134587 + c * (25419 ±939) + h * (29985 ±511) + u * (82828 ±511)` + // Minimum execution time: 1_208_571_000 picoseconds. + Weight::from_parts(156_323_556, 0) + .saturating_add(Weight::from_parts(0, 134587)) + // Standard Error: 1_121_931 + .saturating_add(Weight::from_parts(529_418_705, 0).saturating_mul(u.into())) + // Standard Error: 1_121_931 + .saturating_add(Weight::from_parts(527_179_667, 0).saturating_mul(h.into())) + // Standard Error: 1_859_173 + .saturating_add(Weight::from_parts(45_652_413, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(h.into()))) + .saturating_add(T::DbWeight::get().reads((9_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(6)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((4_u64).saturating_mul(h.into()))) + .saturating_add(T::DbWeight::get().writes((8_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 25419).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 29985).saturating_mul(h.into())) + .saturating_add(Weight::from_parts(0, 82828).saturating_mul(u.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_initializer.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_initializer.rs new file mode 100644 index 0000000000000..4882ee51831fb --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_initializer.rs @@ -0,0 +1,65 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::initializer` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::initializer +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::initializer`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::initializer::WeightInfo for WeightInfo { + /// The range of component `d` is `[0, 65536]`. + fn force_approve(d: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_139_000 picoseconds. + Weight::from_parts(1_915_360, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 13 + .saturating_add(Weight::from_parts(792, 0).saturating_mul(d.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_on_demand.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_on_demand.rs new file mode 100644 index 0000000000000..2b946ca777258 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_on_demand.rs @@ -0,0 +1,126 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::on_demand` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::on_demand +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::on_demand`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::on_demand::WeightInfo for WeightInfo { + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_keep_alive(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `270 + s * (8 ±0)` + // Estimated: `3733 + s * (8 ±0)` + // Minimum execution time: 49_279_000 picoseconds. + Weight::from_parts(33_507_231, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 195 + .saturating_add(Weight::from_parts(30_042, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) + } + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_allow_death(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `270 + s * (8 ±0)` + // Estimated: `3733 + s * (8 ±0)` + // Minimum execution time: 49_928_000 picoseconds. + Weight::from_parts(37_696_286, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 202 + .saturating_add(Weight::from_parts(28_593, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) + } + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::Credits` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Credits` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_with_credits(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `304 + s * (8 ±0)` + // Estimated: `3767 + s * (8 ±0)` + // Minimum execution time: 28_187_000 picoseconds. + Weight::from_parts(12_021_424, 0) + .saturating_add(Weight::from_parts(0, 3767)) + // Standard Error: 157 + .saturating_add(Weight::from_parts(27_490, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_paras.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_paras.rs new file mode 100644 index 0000000000000..dd439db8d303e --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_paras.rs @@ -0,0 +1,295 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::paras` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::paras +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::paras`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::paras::WeightInfo for WeightInfo { + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:1) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodeMeta` (r:1 w:1) + /// Proof: `Paras::PastCodeMeta` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodePruning` (r:1 w:1) + /// Proof: `Paras::PastCodePruning` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PastCodeHash` (r:0 w:1) + /// Proof: `Paras::PastCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:0 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[9, 3145728]`. + fn force_set_current_code(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8309` + // Estimated: `11774` + // Minimum execution time: 38_573_000 picoseconds. + Weight::from_parts(39_400_000, 0) + .saturating_add(Weight::from_parts(0, 11774)) + // Standard Error: 128 + .saturating_add(Weight::from_parts(13_197, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[9, 1048576]`. + fn force_set_current_head(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_110_000 picoseconds. + Weight::from_parts(7_328_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 41 + .saturating_add(Weight::from_parts(4_535, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_set_most_recent_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_564_000 picoseconds. + Weight::from_parts(3_677_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Paras::FutureCodeHash` (r:1 w:1) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeCooldowns` (r:1 w:1) + /// Proof: `Paras::UpgradeCooldowns` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:1 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHashRefs` (r:1 w:1) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[9, 3145728]`. + fn force_schedule_code_upgrade(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8452` + // Estimated: `11917` + // Minimum execution time: 52_408_000 picoseconds. + Weight::from_parts(53_467_000, 0) + .saturating_add(Weight::from_parts(0, 11917)) + // Standard Error: 129 + .saturating_add(Weight::from_parts(13_193, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[9, 1048576]`. + fn force_note_new_head(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `268` + // Estimated: `3733` + // Minimum execution time: 14_751_000 picoseconds. + Weight::from_parts(15_033_000, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 41 + .saturating_add(Weight::from_parts(4_536, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_queue_action() -> Weight { + // Proof Size summary in bytes: + // Measured: `4312` + // Estimated: `7777` + // Minimum execution time: 24_624_000 picoseconds. + Weight::from_parts(25_772_000, 0) + .saturating_add(Weight::from_parts(0, 7777)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `c` is `[9, 3145728]`. + fn add_trusted_validation_code(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `683` + // Estimated: `4148` + // Minimum execution time: 89_347_000 picoseconds. + Weight::from_parts(91_876_000, 0) + .saturating_add(Weight::from_parts(0, 4148)) + // Standard Error: 127 + .saturating_add(Weight::from_parts(12_741, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Paras::CodeByHashRefs` (r:1 w:0) + /// Proof: `Paras::CodeByHashRefs` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CodeByHash` (r:0 w:1) + /// Proof: `Paras::CodeByHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn poke_unused_validation_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `28` + // Estimated: `3493` + // Minimum execution time: 6_736_000 picoseconds. + Weight::from_parts(7_018_000, 0) + .saturating_add(Weight::from_parts(0, 3493)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn include_pvf_check_statement() -> Weight { + // Proof Size summary in bytes: + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 112_868_000 picoseconds. + Weight::from_parts(115_588_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpcomingUpgrades` (r:1 w:1) + /// Proof: `Paras::UpcomingUpgrades` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:0 w:100) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn include_pvf_check_statement_finalize_upgrade_accept() -> Weight { + // Proof Size summary in bytes: + // Measured: `27360` + // Estimated: `30825` + // Minimum execution time: 773_659_000 picoseconds. + Weight::from_parts(794_204_000, 0) + .saturating_add(Weight::from_parts(0, 30825)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(103)) + } + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn include_pvf_check_statement_finalize_upgrade_reject() -> Weight { + // Proof Size summary in bytes: + // Measured: `27338` + // Estimated: `30803` + // Minimum execution time: 108_485_000 picoseconds. + Weight::from_parts(113_249_000, 0) + .saturating_add(Weight::from_parts(0, 30803)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteList` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ActionsQueue` (r:1 w:1) + /// Proof: `Paras::ActionsQueue` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn include_pvf_check_statement_finalize_onboarding_accept() -> Weight { + // Proof Size summary in bytes: + // Measured: `26728` + // Estimated: `30193` + // Minimum execution time: 635_656_000 picoseconds. + Weight::from_parts(643_507_000, 0) + .saturating_add(Weight::from_parts(0, 30193)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::PvfActiveVoteMap` (r:1 w:1) + /// Proof: `Paras::PvfActiveVoteMap` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn include_pvf_check_statement_finalize_onboarding_reject() -> Weight { + // Proof Size summary in bytes: + // Measured: `26706` + // Estimated: `30171` + // Minimum execution time: 103_358_000 picoseconds. + Weight::from_parts(107_759_000, 0) + .saturating_add(Weight::from_parts(0, 30171)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_paras_inherent.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_paras_inherent.rs new file mode 100644 index 0000000000000..f9ad4a3e2c9cc --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/polkadot_runtime_parachains_paras_inherent.rs @@ -0,0 +1,370 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `polkadot_runtime_parachains::paras_inherent` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=polkadot_runtime_parachains::paras_inherent +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `polkadot_runtime_parachains::paras_inherent`. +pub struct WeightInfo(PhantomData); +impl polkadot_runtime_parachains::paras_inherent::WeightInfo for WeightInfo { + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:1 w:0) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn enter_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `37558` + // Estimated: `41023` + // Minimum execution time: 204_765_000 picoseconds. + Weight::from_parts(218_574_000, 0) + .saturating_add(Weight::from_parts(0, 41023)) + .saturating_add(T::DbWeight::get().reads(15)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaSessionInfo::Sessions` (r:1 w:0) + /// Proof: `ParaSessionInfo::Sessions` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:1) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::BackersOnDisputes` (r:1 w:1) + /// Proof: `ParasDisputes::BackersOnDisputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:1 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[400, 1024]`. + fn enter_variable_disputes(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `117466` + // Estimated: `123406` + // Minimum execution time: 18_505_556_000 picoseconds. + Weight::from_parts(680_113_149, 0) + .saturating_add(Weight::from_parts(0, 123406)) + // Standard Error: 16_387 + .saturating_add(Weight::from_parts(44_859_126, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(17)) + } + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:0) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn enter_bitfields() -> Weight { + // Proof Size summary in bytes: + // Measured: `74966` + // Estimated: `80906` + // Minimum execution time: 462_150_000 picoseconds. + Weight::from_parts(479_816_000, 0) + .saturating_add(Weight::from_parts(0, 80906)) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `v` is `[2, 5]`. + fn enter_backed_candidates_variable(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76410` + // Estimated: `82350` + // Minimum execution time: 1_488_610_000 picoseconds. + Weight::from_parts(1_450_591_151, 0) + .saturating_add(Weight::from_parts(0, 82350)) + // Standard Error: 393_462 + .saturating_add(Weight::from_parts(44_344_438, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(29)) + .saturating_add(T::DbWeight::get().writes(15)) + } + /// Storage: `ParaInherent::Included` (r:1 w:1) + /// Proof: `ParaInherent::Included` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::ParentHash` (r:1 w:0) + /// Proof: `System::ParentHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::AllowedRelayParents` (r:1 w:1) + /// Proof: `ParasShared::AllowedRelayParents` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ClaimQueue` (r:1 w:1) + /// Proof: `ParaScheduler::ClaimQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::ValidatorGroups` (r:1 w:0) + /// Proof: `ParaScheduler::ValidatorGroups` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::ActiveValidatorKeys` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorKeys` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Babe::AuthorVrfRandomness` (r:1 w:0) + /// Proof: `Babe::AuthorVrfRandomness` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `ParaInherent::OnChainVotes` (r:1 w:1) + /// Proof: `ParaInherent::OnChainVotes` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Frozen` (r:1 w:0) + /// Proof: `ParasDisputes::Frozen` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParaInclusion::V1` (r:2 w:1) + /// Proof: `ParaInclusion::V1` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaSessionInfo::AccountKeys` (r:1 w:0) + /// Proof: `ParaSessionInfo::AccountKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::Validators` (r:1 w:0) + /// Proof: `Session::Validators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:1) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpChannelDigests` (r:1 w:1) + /// Proof: `Hrmp::HrmpChannelDigests` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeUpgrades` (r:1 w:0) + /// Proof: `Paras::FutureCodeUpgrades` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Disputes` (r:1 w:0) + /// Proof: `ParasDisputes::Disputes` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParaScheduler::SessionStartBlock` (r:1 w:0) + /// Proof: `ParaScheduler::SessionStartBlock` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Initializer::BufferedSessionChanges` (r:1 w:0) + /// Proof: `Initializer::BufferedSessionChanges` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Paras::CurrentCodeHash` (r:1 w:0) + /// Proof: `Paras::CurrentCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::FutureCodeHash` (r:1 w:0) + /// Proof: `Paras::FutureCodeHash` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `Paras::UpgradeRestrictionSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::ParaLifecycles` (r:1 w:0) + /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `ParasShared::ActiveValidatorIndices` (r:1 w:0) + /// Proof: `ParasShared::ActiveValidatorIndices` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Session::DisabledValidators` (r:1 w:0) + /// Proof: `Session::DisabledValidators` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) + /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParasDisputes::Included` (r:0 w:1) + /// Proof: `ParasDisputes::Included` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Hrmp::HrmpWatermarks` (r:0 w:1) + /// Proof: `Hrmp::HrmpWatermarks` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:0 w:1) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::UpgradeGoAheadSignal` (r:0 w:1) + /// Proof: `Paras::UpgradeGoAheadSignal` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::MostRecentContext` (r:0 w:1) + /// Proof: `Paras::MostRecentContext` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn enter_backed_candidate_code_upgrade() -> Weight { + // Proof Size summary in bytes: + // Measured: `76423` + // Estimated: `82363` + // Minimum execution time: 83_641_026_000 picoseconds. + Weight::from_parts(84_843_754_000, 0) + .saturating_add(Weight::from_parts(0, 82363)) + .saturating_add(T::DbWeight::get().reads(31)) + .saturating_add(T::DbWeight::get().writes(15)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/mod.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/mod.rs new file mode 100644 index 0000000000000..ddba1294a8be9 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/mod.rs @@ -0,0 +1,319 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +mod pallet_xcm_benchmarks_fungible; +mod pallet_xcm_benchmarks_generic; + +use crate::Runtime; +use alloc::vec::Vec; +use frame_support::weights::Weight; +use xcm::{ + latest::{prelude::*, QueryResponseInfo}, + DoubleEncoded, +}; + +use pallet_xcm_benchmarks_fungible::WeightInfo as XcmBalancesWeight; +use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; +use sp_runtime::BoundedVec; +use xcm::latest::AssetTransferFilter; + +/// Types of asset supported by the westend runtime. +pub enum AssetTypes { + /// An asset backed by `pallet-balances`. + Balances, + /// Unknown asset. + Unknown, +} + +impl From<&Asset> for AssetTypes { + fn from(asset: &Asset) -> Self { + match asset { + Asset { id: AssetId(Location { parents: 0, interior: Here }), .. } => + AssetTypes::Balances, + _ => AssetTypes::Unknown, + } + } +} + +trait WeighAssets { + fn weigh_assets(&self, balances_weight: Weight) -> Weight; +} + +// Westend only knows about one asset, the balances pallet. +const MAX_ASSETS: u64 = 1; + +impl WeighAssets for AssetFilter { + fn weigh_assets(&self, balances_weight: Weight) -> Weight { + match self { + Self::Definite(assets) => assets + .inner() + .into_iter() + .map(From::from) + .map(|t| match t { + AssetTypes::Balances => balances_weight, + AssetTypes::Unknown => Weight::MAX, + }) + .fold(Weight::zero(), |acc, x| acc.saturating_add(x)), + // We don't support any NFTs on Westend, so these two variants will always match + // only 1 kind of fungible asset. + Self::Wild(AllOf { .. } | AllOfCounted { .. }) => balances_weight, + Self::Wild(AllCounted(count)) => + balances_weight.saturating_mul(MAX_ASSETS.min(*count as u64)), + Self::Wild(All) => balances_weight.saturating_mul(MAX_ASSETS), + } + } +} + +impl WeighAssets for Assets { + fn weigh_assets(&self, balances_weight: Weight) -> Weight { + self.inner() + .into_iter() + .map(|m| >::from(m)) + .map(|t| match t { + AssetTypes::Balances => balances_weight, + AssetTypes::Unknown => Weight::MAX, + }) + .fold(Weight::zero(), |acc, x| acc.saturating_add(x)) + } +} + +pub struct WestendXcmWeight(core::marker::PhantomData); +impl XcmWeightInfo for WestendXcmWeight { + fn withdraw_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::withdraw_asset()) + } + fn reserve_asset_deposited(assets: &Assets) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::reserve_asset_deposited()) + } + fn receive_teleported_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::receive_teleported_asset()) + } + fn query_response( + _query_id: &u64, + _response: &Response, + _max_weight: &Weight, + _querier: &Option, + ) -> Weight { + XcmGeneric::::query_response() + } + fn transfer_asset(assets: &Assets, _dest: &Location) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::transfer_asset()) + } + fn transfer_reserve_asset(assets: &Assets, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::transfer_reserve_asset()) + } + fn transact( + _origin_kind: &OriginKind, + _fallback_max_weight: &Option, + _call: &DoubleEncoded, + ) -> Weight { + XcmGeneric::::transact() + } + fn hrmp_new_channel_open_request( + _sender: &u32, + _max_message_size: &u32, + _max_capacity: &u32, + ) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_accepted(_recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { + // XCM Executor does not currently support HRMP channel operations + Weight::MAX + } + fn clear_origin() -> Weight { + XcmGeneric::::clear_origin() + } + fn descend_origin(_who: &InteriorLocation) -> Weight { + XcmGeneric::::descend_origin() + } + fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_error() + } + + fn deposit_asset(assets: &AssetFilter, _dest: &Location) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::deposit_asset()) + } + fn deposit_reserve_asset(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::deposit_reserve_asset()) + } + fn exchange_asset(_give: &AssetFilter, _receive: &Assets, _maximal: &bool) -> Weight { + // Westend does not currently support exchange asset operations + Weight::MAX + } + fn initiate_reserve_withdraw( + assets: &AssetFilter, + _reserve: &Location, + _xcm: &Xcm<()>, + ) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::initiate_reserve_withdraw()) + } + fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { + assets.weigh_assets(XcmBalancesWeight::::initiate_teleport()) + } + fn initiate_transfer( + _dest: &Location, + remote_fees: &Option, + _preserve_origin: &bool, + assets: &BoundedVec, + _xcm: &Xcm<()>, + ) -> Weight { + let base_weight = XcmBalancesWeight::::initiate_transfer(); + let mut weight = if let Some(remote_fees) = remote_fees { + let fees = remote_fees.inner(); + fees.weigh_assets(base_weight) + } else { + base_weight + }; + for asset_filter in assets { + let assets = asset_filter.inner(); + let extra = assets.weigh_assets(XcmBalancesWeight::::initiate_transfer()); + weight = weight.saturating_add(extra); + } + weight + } + fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { + XcmGeneric::::report_holding() + } + fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { + XcmGeneric::::buy_execution() + } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } + fn refund_surplus() -> Weight { + XcmGeneric::::refund_surplus() + } + fn set_error_handler(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_error_handler() + } + fn set_appendix(_xcm: &Xcm) -> Weight { + XcmGeneric::::set_appendix() + } + fn clear_error() -> Weight { + XcmGeneric::::clear_error() + } + fn set_hints(hints: &BoundedVec) -> Weight { + let mut weight = Weight::zero(); + for hint in hints { + match hint { + AssetClaimer { .. } => { + weight = weight.saturating_add(XcmGeneric::::asset_claimer()); + }, + } + } + weight + } + fn claim_asset(_assets: &Assets, _ticket: &Location) -> Weight { + XcmGeneric::::claim_asset() + } + fn trap(_code: &u64) -> Weight { + XcmGeneric::::trap() + } + fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { + XcmGeneric::::subscribe_version() + } + fn unsubscribe_version() -> Weight { + XcmGeneric::::unsubscribe_version() + } + fn burn_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmGeneric::::burn_asset()) + } + fn expect_asset(assets: &Assets) -> Weight { + assets.weigh_assets(XcmGeneric::::expect_asset()) + } + fn expect_origin(_origin: &Option) -> Weight { + XcmGeneric::::expect_origin() + } + fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { + XcmGeneric::::expect_error() + } + fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { + XcmGeneric::::expect_transact_status() + } + fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::query_pallet() + } + fn expect_pallet( + _index: &u32, + _name: &Vec, + _module_name: &Vec, + _crate_major: &u32, + _min_crate_minor: &u32, + ) -> Weight { + XcmGeneric::::expect_pallet() + } + fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { + XcmGeneric::::report_transact_status() + } + fn clear_transact_status() -> Weight { + XcmGeneric::::clear_transact_status() + } + fn universal_origin(_: &Junction) -> Weight { + // Westend does not currently support universal origin operations + Weight::MAX + } + fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { + // Westend relay should not support export message operations + Weight::MAX + } + fn lock_asset(_: &Asset, _: &Location) -> Weight { + // Westend does not currently support asset locking operations + Weight::MAX + } + fn unlock_asset(_: &Asset, _: &Location) -> Weight { + // Westend does not currently support asset locking operations + Weight::MAX + } + fn note_unlockable(_: &Asset, _: &Location) -> Weight { + // Westend does not currently support asset locking operations + Weight::MAX + } + fn request_unlock(_: &Asset, _: &Location) -> Weight { + // Westend does not currently support asset locking operations + Weight::MAX + } + fn set_fees_mode(_: &bool) -> Weight { + XcmGeneric::::set_fees_mode() + } + fn set_topic(_topic: &[u8; 32]) -> Weight { + XcmGeneric::::set_topic() + } + fn clear_topic() -> Weight { + XcmGeneric::::clear_topic() + } + fn alias_origin(_: &Location) -> Weight { + XcmGeneric::::alias_origin() + } + fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { + XcmGeneric::::unpaid_execution() + } + fn execute_with_origin(_: &Option, _: &Xcm) -> Weight { + XcmGeneric::::execute_with_origin() + } +} + +#[test] +fn all_counted_has_a_sane_weight_upper_limit() { + let assets = AssetFilter::Wild(AllCounted(4294967295)); + let weight = Weight::from_parts(1000, 1000); + + assert_eq!(assets.weigh_assets(weight), weight * MAX_ASSETS); +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs new file mode 100644 index 0000000000000..1aa864522a489 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -0,0 +1,217 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_xcm_benchmarks::fungible +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/xcm +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --template=polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::fungible`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub(crate) fn withdraw_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `3593` + // Minimum execution time: 32_959_000 picoseconds. + Weight::from_parts(34_026_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub(crate) fn transfer_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `101` + // Estimated: `6196` + // Minimum execution time: 45_561_000 picoseconds. + Weight::from_parts(47_174_000, 6196) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:3 w:3) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn transfer_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `416` + // Estimated: `8799` + // Minimum execution time: 122_629_000 picoseconds. + Weight::from_parts(128_496_000, 8799) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `416` + // Estimated: `6196` + // Minimum execution time: 87_970_000 picoseconds. + Weight::from_parts(90_204_000, 6196) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub(crate) fn receive_teleported_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 33_369_000 picoseconds. + Weight::from_parts(34_221_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + pub(crate) fn deposit_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 25_481_000 picoseconds. + Weight::from_parts(26_034_000, 3593) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn deposit_reserve_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `3677` + // Minimum execution time: 78_649_000 picoseconds. + Weight::from_parts(81_856_000, 3677) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn initiate_teleport() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `3677` + // Minimum execution time: 80_300_000 picoseconds. + Weight::from_parts(82_448_000, 3677) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn initiate_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `315` + // Estimated: `6196` + // Minimum execution time: 100_996_000 picoseconds. + Weight::from_parts(103_473_000, 6196) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/pallet_xcm_benchmarks_generic.rs new file mode 100644 index 0000000000000..1650928bc9a50 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -0,0 +1,360 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_xcm_benchmarks::generic` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `3a2e9ae8a8f5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_xcm_benchmarks::generic +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/xcm +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --template=polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --no-storage-info +// --no-min-squares +// --no-median-slopes + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm_benchmarks::generic`. +pub struct WeightInfo(PhantomData); +impl WeightInfo { + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn report_holding() -> Weight { + // Proof Size summary in bytes: + // Measured: `416` + // Estimated: `6196` + // Minimum execution time: 85_049_000 picoseconds. + Weight::from_parts(86_990_000, 6196) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub(crate) fn buy_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 765_000 picoseconds. + Weight::from_parts(843_000, 0) + } + pub(crate) fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_417_000 picoseconds. + Weight::from_parts(3_570_000, 0) + } + pub(crate) fn asset_claimer() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 758_000 picoseconds. + Weight::from_parts(827_000, 0) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn query_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 6_326_000 picoseconds. + Weight::from_parts(6_588_000, 3465) + .saturating_add(T::DbWeight::get().reads(1)) + } + pub(crate) fn transact() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_872_000 picoseconds. + Weight::from_parts(8_163_000, 0) + } + pub(crate) fn refund_surplus() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_361_000 picoseconds. + Weight::from_parts(1_477_000, 0) + } + pub(crate) fn set_error_handler() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 723_000 picoseconds. + Weight::from_parts(827_000, 0) + } + pub(crate) fn set_appendix() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 752_000 picoseconds. + Weight::from_parts(817_000, 0) + } + pub(crate) fn clear_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 729_000 picoseconds. + Weight::from_parts(774_000, 0) + } + pub(crate) fn descend_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 784_000 picoseconds. + Weight::from_parts(834_000, 0) + } + pub(crate) fn execute_with_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 836_000 picoseconds. + Weight::from_parts(907_000, 0) + } + pub(crate) fn clear_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 753_000 picoseconds. + Weight::from_parts(808_000, 0) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn report_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `416` + // Estimated: `6196` + // Minimum execution time: 82_051_000 picoseconds. + Weight::from_parts(84_513_000, 6196) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn claim_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 9_753_000 picoseconds. + Weight::from_parts(10_101_000, 3488) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub(crate) fn trap() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 762_000 picoseconds. + Weight::from_parts(825_000, 0) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn subscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `3677` + // Minimum execution time: 42_745_000 picoseconds. + Weight::from_parts(44_123_000, 3677) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn unsubscribe_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_198_000 picoseconds. + Weight::from_parts(3_356_000, 0) + .saturating_add(T::DbWeight::get().writes(1)) + } + pub(crate) fn burn_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_122_000 picoseconds. + Weight::from_parts(1_255_000, 0) + } + pub(crate) fn expect_asset() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 865_000 picoseconds. + Weight::from_parts(911_000, 0) + } + pub(crate) fn expect_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 748_000 picoseconds. + Weight::from_parts(815_000, 0) + } + pub(crate) fn expect_error() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 724_000 picoseconds. + Weight::from_parts(783_000, 0) + } + pub(crate) fn expect_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 885_000 picoseconds. + Weight::from_parts(973_000, 0) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn query_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `416` + // Estimated: `6196` + // Minimum execution time: 92_780_000 picoseconds. + Weight::from_parts(94_883_000, 6196) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub(crate) fn expect_pallet() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_284_000 picoseconds. + Weight::from_parts(8_505_000, 0) + } + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Paras::Heads` (r:1 w:0) + /// Proof: `Paras::Heads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub(crate) fn report_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `416` + // Estimated: `6196` + // Minimum execution time: 82_598_000 picoseconds. + Weight::from_parts(86_748_000, 6196) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + pub(crate) fn clear_transact_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 801_000 picoseconds. + Weight::from_parts(853_000, 0) + } + pub(crate) fn set_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 703_000 picoseconds. + Weight::from_parts(782_000, 0) + } + pub(crate) fn clear_topic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 674_000 picoseconds. + Weight::from_parts(759_000, 0) + } + pub(crate) fn set_fees_mode() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 712_000 picoseconds. + Weight::from_parts(793_000, 0) + } + pub(crate) fn unpaid_execution() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 745_000 picoseconds. + Weight::from_parts(812_000, 0) + } + pub(crate) fn alias_origin() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 757_000 picoseconds. + Weight::from_parts(799_000, 0) + } +} diff --git a/substrate/frame/staking-async/runtimes/rc/src/xcm_config.rs b/substrate/frame/staking-async/runtimes/rc/src/xcm_config.rs new file mode 100644 index 0000000000000..8f53e88a45396 --- /dev/null +++ b/substrate/frame/staking-async/runtimes/rc/src/xcm_config.rs @@ -0,0 +1,326 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! XCM configurations for Westend. + +use super::{ + parachains_origin, AccountId, AllPalletsWithSystem, Balances, Dmp, FellowshipAdmin, + GeneralAdmin, ParaId, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, StakingAdmin, + TransactionByteFee, Treasury, WeightToFee, XcmPallet, +}; +use crate::{governance::pallet_custom_origins::Treasurer, Balance, RuntimeHoldReason}; +use frame_support::{ + parameter_types, + traits::{ + fungible::HoldConsideration, Contains, Equals, Everything, LinearStoragePrice, Nothing, + }, +}; +use frame_system::EnsureRoot; +use pallet_staking_async_rc_runtime_constants::{ + currency::CENTS, system_parachain::*, xcm::body::FELLOWSHIP_ADMIN_INDEX, +}; +use pallet_xcm::XcmPassthrough; +use polkadot_runtime_common::{ + xcm_sender::{ChildParachainRouter, ExponentialPrice}, + ToAuthor, +}; +use sp_core::ConstU32; +use xcm::latest::{prelude::*, WESTEND_GENESIS_HASH}; +use xcm_builder::{ + AccountId32Aliases, AliasChildLocation, AllowExplicitUnpaidExecutionFrom, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + ChildParachainAsNative, ChildParachainConvertsVia, DescribeAllTerminal, DescribeFamily, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsChildSystemParachain, + IsConcrete, MintLocation, OriginToPluralityVoice, SendXcmFeeToAccount, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, +}; +use xcm_executor::XcmExecutor; + +parameter_types! { + pub const TokenLocation: Location = Here.into_location(); + pub const RootLocation: Location = Location::here(); + pub const ThisNetwork: NetworkId = ByGenesis(WESTEND_GENESIS_HASH); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(ThisNetwork::get())].into(); + pub CheckAccount: AccountId = XcmPallet::check_account(); + pub LocalCheckAccount: (AccountId, MintLocation) = (CheckAccount::get(), MintLocation::Local); + pub TreasuryAccount: AccountId = Treasury::account_id(); + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = AssetId(TokenLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type LocationConverter = ( + // We can convert a child parachain using the standard `AccountId` conversion. + ChildParachainConvertsVia, + // We can directly alias an `AccountId32` into a local account. + AccountId32Aliases, + // Foreign locations alias into accounts according to a hash of their standard description. + HashedDescription>, +); + +pub type LocalAssetTransactor = FungibleAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // We can convert the Locations with our converter above: + LocationConverter, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // It's a native asset so we keep track of the teleports to maintain total issuance. + LocalCheckAccount, +>; + +type LocalOriginConverter = ( + // If the origin kind is `Sovereign`, then return a `Signed` origin with the account determined + // by the `LocationConverter` converter. + SovereignSignedViaLocation, + // If the origin kind is `Native` and the XCM origin is a child parachain, then we can express + // it with the special `parachains_origin::Origin` origin variant. + ChildParachainAsNative, + // If the origin kind is `Native` and the XCM origin is the `AccountId32` location, then it can + // be expressed using the `Signed` origin variant. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +pub type PriceForChildParachainDelivery = + ExponentialPrice; + +/// The XCM router. When we want to send an XCM message, we use this type. It amalgamates all of our +/// individual routers. +pub type XcmRouter = WithUniqueTopic< + // Only one router so far - use DMP to communicate with child parachains. + ChildParachainRouter, +>; + +parameter_types! { + pub AssetHub: Location = Parachain(ASSET_HUB_ID).into_location(); + pub Collectives: Location = Parachain(COLLECTIVES_ID).into_location(); + pub BridgeHub: Location = Parachain(BRIDGE_HUB_ID).into_location(); + pub Encointer: Location = Parachain(ENCOINTER_ID).into_location(); + pub People: Location = Parachain(PEOPLE_ID).into_location(); + pub Broker: Location = Parachain(BROKER_ID).into_location(); + pub Wnd: AssetFilter = Wild(AllOf { fun: WildFungible, id: AssetId(TokenLocation::get()) }); + pub WndForAssetHub: (AssetFilter, Location) = (Wnd::get(), AssetHub::get()); + pub WndForCollectives: (AssetFilter, Location) = (Wnd::get(), Collectives::get()); + pub WndForBridgeHub: (AssetFilter, Location) = (Wnd::get(), BridgeHub::get()); + pub WndForEncointer: (AssetFilter, Location) = (Wnd::get(), Encointer::get()); + pub WndForPeople: (AssetFilter, Location) = (Wnd::get(), People::get()); + pub WndForBroker: (AssetFilter, Location) = (Wnd::get(), Broker::get()); + pub MaxInstructions: u32 = 100; + pub MaxAssetsIntoHolding: u32 = 64; +} + +pub type TrustedTeleporters = ( + xcm_builder::Case, + xcm_builder::Case, + xcm_builder::Case, + xcm_builder::Case, + xcm_builder::Case, + xcm_builder::Case, +); + +pub struct OnlyParachains; +impl Contains for OnlyParachains { + fn contains(location: &Location) -> bool { + matches!(location.unpack(), (0, [Parachain(_)])) + } +} + +pub struct Fellows; +impl Contains for Fellows { + fn contains(location: &Location) -> bool { + matches!( + location.unpack(), + (0, [Parachain(COLLECTIVES_ID), Plurality { id: BodyId::Technical, .. }]) + ) + } +} + +pub struct LocalPlurality; +impl Contains for LocalPlurality { + fn contains(loc: &Location) -> bool { + matches!(loc.unpack(), (0, [Plurality { .. }])) + } +} + +/// The barriers one of which must be passed for an XCM message to be executed. +pub type Barrier = TrailingSetTopicAsId<( + // Weight that is paid for may be consumed. + TakeWeightCredit, + // Expected responses are OK. + AllowKnownQueryResponses, + WithComputedOrigin< + ( + // If the message is one that immediately attempts to pay for execution, then allow it. + AllowTopLevelPaidExecutionFrom, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, + // Messages from system parachains or the Fellows plurality need not pay for execution. + AllowExplicitUnpaidExecutionFrom<(IsChildSystemParachain, Fellows)>, + ), + UniversalLocation, + ConstU32<8>, + >, +)>; + +/// Locations that will not be charged fees in the executor, neither for execution nor delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = (SystemParachains, Equals, LocalPlurality); + +/// We let locations alias into child locations of their own. +/// This is a very simple aliasing rule, mimicking the behaviour of +/// the `DescendOrigin` instruction. +pub type Aliasers = AliasChildLocation; + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = LocalAssetTransactor; + type OriginConverter = LocalOriginConverter; + type IsReserve = (); + type XcmEventEmitter = XcmPallet; + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = WeightInfoBounds< + crate::weights::xcm::WestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; + type Trader = + UsingComponents>; + type ResponseHandler = XcmPallet; + type AssetTrap = XcmPallet; + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = XcmPallet; + type SubscriptionService = XcmPallet; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + SendXcmFeeToAccount, + >; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Aliasers; + type TransactionalProcessor = FrameTransactionalProcessor; + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = XcmPallet; +} + +parameter_types! { + // `GeneralAdmin` pluralistic body. + pub const GeneralAdminBodyId: BodyId = BodyId::Administration; + // StakingAdmin pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; + // FellowshipAdmin pluralistic body. + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); + // `Treasurer` pluralistic body. + pub const TreasurerBodyId: BodyId = BodyId::Treasury; + + pub const DepositPerItem: Balance = crate::deposit(1, 0); + pub const DepositPerByte: Balance = crate::deposit(0, 1); + pub const AuthorizeAliasHoldReason: RuntimeHoldReason = RuntimeHoldReason::XcmPallet(pallet_xcm::HoldReason::AuthorizeAlias); +} + +/// Type to convert the `GeneralAdmin` origin to a Plurality `Location` value. +pub type GeneralAdminToPlurality = + OriginToPluralityVoice; + +/// location of this chain. +pub type LocalOriginToLocation = ( + GeneralAdminToPlurality, + // And a usual Signed origin to be used in XCM as a corresponding AccountId32 + SignedToAccountId32, +); + +/// Type to convert the `StakingAdmin` origin to a Plurality `Location` value. +pub type StakingAdminToPlurality = + OriginToPluralityVoice; + +/// Type to convert the `FellowshipAdmin` origin to a Plurality `Location` value. +pub type FellowshipAdminToPlurality = + OriginToPluralityVoice; + +/// Type to convert the `Treasurer` origin to a Plurality `Location` value. +pub type TreasurerToPlurality = OriginToPluralityVoice; + +/// Type to convert a pallet `Origin` type value into a `Location` value which represents an +/// interior location of this chain for a destination chain. +pub type LocalPalletOriginToLocation = ( + // GeneralAdmin origin to be used in XCM as a corresponding Plurality `Location` value. + GeneralAdminToPlurality, + // StakingAdmin origin to be used in XCM as a corresponding Plurality `Location` value. + StakingAdminToPlurality, + // FellowshipAdmin origin to be used in XCM as a corresponding Plurality `Location` value. + FellowshipAdminToPlurality, + // `Treasurer` origin to be used in XCM as a corresponding Plurality `Location` value. + TreasurerToPlurality, +); + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // Note that this configuration of `SendXcmOrigin` is different from the one present in + // production. + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin< + RuntimeOrigin, + (LocalPalletOriginToLocation, LocalOriginToLocation), + >; + type XcmRouter = XcmRouter; + // Anyone can execute XCM messages locally. + type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = WeightInfoBounds< + crate::weights::xcm::WestendXcmWeight, + RuntimeCall, + MaxInstructions, + >; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = (); + type SovereignAccountOf = LocationConverter; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = crate::weights::pallet_xcm::WeightInfo; + type AdminOrigin = EnsureRoot; + type AuthorizedAliasConsideration = HoldConsideration< + AccountId, + Balances, + AuthorizeAliasHoldReason, + LinearStoragePrice, + >; +} diff --git a/substrate/frame/staking-async/src/asset.rs b/substrate/frame/staking-async/src/asset.rs new file mode 100644 index 0000000000000..591ffc06eec5d --- /dev/null +++ b/substrate/frame/staking-async/src/asset.rs @@ -0,0 +1,163 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains all the interactions with [`Config::Currency`] to manipulate the underlying staking +//! asset. + +use crate::{BalanceOf, Config, HoldReason, NegativeImbalanceOf, PositiveImbalanceOf}; +use frame_support::traits::{ + fungible::{ + hold::{Balanced as FunHoldBalanced, Inspect as FunHoldInspect, Mutate as FunHoldMutate}, + Balanced, Inspect as FunInspect, + }, + tokens::{Fortitude, Precision, Preservation}, +}; +use sp_runtime::{DispatchResult, Saturating}; + +/// Existential deposit for the chain. +pub fn existential_deposit() -> BalanceOf { + T::Currency::minimum_balance() +} + +/// Total issuance of the chain. +pub fn total_issuance() -> BalanceOf { + T::Currency::total_issuance() +} + +/// Total balance of `who`. Includes both free and staked. +pub fn total_balance(who: &T::AccountId) -> BalanceOf { + T::Currency::total_balance(who) +} + +/// Stakeable balance of `who`. +/// +/// This includes balance free to stake along with any balance that is already staked. +pub fn stakeable_balance(who: &T::AccountId) -> BalanceOf { + free_to_stake::(who).saturating_add(staked::(who)) +} + +/// Balance of `who` that is currently at stake. +/// +/// The staked amount is on hold and cannot be transferred out of `who`s account. +pub fn staked(who: &T::AccountId) -> BalanceOf { + T::Currency::balance_on_hold(&HoldReason::Staking.into(), who) +} + +/// Balance of who that can be staked additionally. +/// +/// Does not include the current stake. +pub fn free_to_stake(who: &T::AccountId) -> BalanceOf { + // since we want to be able to use frozen funds for staking, we force the reduction. + T::Currency::reducible_balance(who, Preservation::Preserve, Fortitude::Force) +} + +/// Update `amount` at stake for `who`. +/// +/// Overwrites the existing stake amount. If passed amount is lower than the existing stake, the +/// difference is unlocked. +pub fn update_stake(who: &T::AccountId, amount: BalanceOf) -> DispatchResult { + T::Currency::set_on_hold(&HoldReason::Staking.into(), who, amount) +} + +/// Release all staked amount to `who`. +/// +/// Fails if there are consumers left on `who` that restricts it from being reaped. +pub fn kill_stake(who: &T::AccountId) -> DispatchResult { + T::Currency::release_all(&HoldReason::Staking.into(), who, Precision::BestEffort).map(|_| ()) +} + +/// Slash the value from `who`. +/// +/// A negative imbalance is returned which can be resolved to deposit the slashed value. +pub fn slash( + who: &T::AccountId, + value: BalanceOf, +) -> (NegativeImbalanceOf, BalanceOf) { + T::Currency::slash(&HoldReason::Staking.into(), who, value) +} + +/// Mint `value` into an existing account `who`. +/// +/// This does not increase the total issuance. +pub fn mint_into_existing( + who: &T::AccountId, + value: BalanceOf, +) -> Option> { + // since the account already exists, we mint exact value even if value is below ED. + T::Currency::deposit(who, value, Precision::Exact).ok() +} + +/// Mint `value` and create account for `who` if it does not exist. +/// +/// If value is below existential deposit, the account is not created. +/// +/// Note: This does not increase the total issuance. +pub fn mint_creating(who: &T::AccountId, value: BalanceOf) -> PositiveImbalanceOf { + T::Currency::deposit(who, value, Precision::BestEffort).unwrap_or_default() +} + +/// Deposit newly issued or slashed `value` into `who`. +pub fn deposit_slashed(who: &T::AccountId, value: NegativeImbalanceOf) { + let _ = T::Currency::resolve(who, value); +} + +/// Issue `value` increasing total issuance. +/// +/// Creates a negative imbalance. +pub fn issue(value: BalanceOf) -> NegativeImbalanceOf { + T::Currency::issue(value) +} + +/// Burn the amount from the total issuance. +#[cfg(feature = "runtime-benchmarks")] +pub fn burn(amount: BalanceOf) -> PositiveImbalanceOf { + T::Currency::rescind(amount) +} + +/// Set balance that can be staked for `who`. +/// +/// If `Value` is lower than the current staked balance, the difference is unlocked. +/// +/// Should only be used with test. +#[cfg(any(test, feature = "runtime-benchmarks"))] +pub fn set_stakeable_balance(who: &T::AccountId, value: BalanceOf) { + use frame_support::traits::fungible::Mutate; + + // minimum free balance (non-staked) required to keep the account alive. + let ed = existential_deposit::(); + // currently on stake + let staked_balance = staked::(who); + + // if new value is greater than staked balance, mint some free balance. + if value > staked_balance { + let _ = T::Currency::set_balance(who, value - staked_balance + ed); + } else { + // else reduce the staked balance. + update_stake::(who, value).expect("can remove from what is staked"); + // burn all free, only leaving ED. + let _ = T::Currency::set_balance(who, ed); + } + + // ensure new stakeable balance same as desired `value`. + assert_eq!(stakeable_balance::(who), value); +} + +/// Return the amount staked and available to stake in one tuple. +#[cfg(test)] +pub fn staked_and_not(who: &T::AccountId) -> (BalanceOf, BalanceOf) { + (staked::(who), free_to_stake::(who)) +} diff --git a/substrate/frame/staking-async/src/benchmarking.rs b/substrate/frame/staking-async/src/benchmarking.rs new file mode 100644 index 0000000000000..cab409ba9e408 --- /dev/null +++ b/substrate/frame/staking-async/src/benchmarking.rs @@ -0,0 +1,1309 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Staking pallet benchmarking. + +use super::*; +use crate::{ + asset, + session_rotation::{Eras, Rotator}, + ConfigOp, Pallet as Staking, +}; +use codec::Decode; +pub use frame_benchmarking::{ + impl_benchmark_test_suite, v2::*, whitelist_account, whitelisted_caller, BenchmarkError, +}; +use frame_election_provider_support::SortedListProvider; +use frame_support::{pallet_prelude::*, storage::bounded_vec::BoundedVec, traits::Get}; +use frame_system::RawOrigin; +use pallet_staking_async_rc_client as rc_client; +use sp_runtime::{ + traits::{Bounded, One, StaticLookup, TrailingZeroInput, Zero}, + Perbill, Percent, Saturating, +}; +use sp_staking::currency_to_vote::CurrencyToVote; +use testing_utils::*; + +const SEED: u32 = 0; +const MAX_SPANS: u32 = 100; +const MAX_SLASHES: u32 = 1000; + +// Add slashing spans to a user account. Not relevant for actual use, only to benchmark +// read and write operations. +pub(crate) fn add_slashing_spans(who: &T::AccountId, spans: u32) { + if spans == 0 { + return + } + + // For the first slashing span, we initialize + let mut slashing_spans = crate::slashing::SlashingSpans::new(0); + SpanSlash::::insert((who, 0), crate::slashing::SpanRecord::default()); + + for i in 1..spans { + assert!(slashing_spans.end_span(i)); + SpanSlash::::insert((who, i), crate::slashing::SpanRecord::default()); + } + SlashingSpans::::insert(who, slashing_spans); +} + +// This function clears all existing validators and nominators from the set, and generates one new +// validator being nominated by n nominators, and returns the validator stash account and the +// nominators' stash and controller. It also starts plans a new era with this new stakers, and +// returns the planned era index. +pub(crate) fn create_validator_with_nominators( + n: u32, + upper_bound: u32, + dead_controller: bool, + unique_controller: bool, + destination: RewardDestination, +) -> Result<(T::AccountId, Vec<(T::AccountId, T::AccountId)>, EraIndex), &'static str> { + // TODO: this can be replaced with `testing_utils` version? + // Clean up any existing state. + clear_validators_and_nominators::(); + let mut points_total = 0; + let mut points_individual = Vec::new(); + + let (v_stash, v_controller) = if unique_controller { + create_unique_stash_controller::(0, 100, destination.clone(), false)? + } else { + create_stash_controller::(0, 100, destination.clone())? + }; + + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; + Staking::::validate(RawOrigin::Signed(v_controller).into(), validator_prefs)?; + let stash_lookup = T::Lookup::unlookup(v_stash.clone()); + + points_total += 10; + points_individual.push((v_stash.clone(), 10)); + + let original_nominator_count = Nominators::::count(); + let mut nominators = Vec::new(); + + // Give the validator n nominators, but keep total users in the system the same. + for i in 0..upper_bound { + let (n_stash, n_controller) = if !dead_controller { + create_stash_controller::(u32::MAX - i, 100, destination.clone())? + } else { + create_unique_stash_controller::(u32::MAX - i, 100, destination.clone(), true)? + }; + if i < n { + Staking::::nominate( + RawOrigin::Signed(n_controller.clone()).into(), + vec![stash_lookup.clone()], + )?; + nominators.push((n_stash, n_controller)); + } + } + + ValidatorCount::::put(1); + + // Start a new Era + let new_validators = Rotator::::legacy_insta_plan_era(); + let planned_era = CurrentEra::::get().unwrap_or_default(); + + assert_eq!(new_validators.len(), 1, "New validators is not 1"); + assert_eq!(new_validators[0], v_stash, "Our validator was not selected"); + assert_ne!(Validators::::count(), 0, "New validators count wrong"); + assert_eq!( + Nominators::::count(), + original_nominator_count + nominators.len() as u32, + "New nominators count wrong" + ); + + // Give Era Points + let reward = EraRewardPoints:: { + total: points_total, + individual: points_individual.into_iter().collect(), + }; + + ErasRewardPoints::::insert(planned_era, reward); + + // Create reward pool + let total_payout = asset::existential_deposit::() + .saturating_mul(upper_bound.into()) + .saturating_mul(1000u32.into()); + >::insert(planned_era, total_payout); + + Ok((v_stash, nominators, planned_era)) +} + +struct ListScenario { + /// Stash that is expected to be moved. + origin_stash1: T::AccountId, + /// Controller of the Stash that is expected to be moved. + origin_controller1: T::AccountId, + dest_weight: BalanceOf, +} + +impl ListScenario { + /// An expensive scenario for bags-list implementation: + /// + /// - the node to be updated (r) is the head of a bag that has at least one other node. The bag + /// itself will need to be read and written to update its head. The node pointed to by r.next + /// will need to be read and written as it will need to have its prev pointer updated. Note + /// that there are two other worst case scenarios for bag removal: 1) the node is a tail and + /// 2) the node is a middle node with prev and next; all scenarios end up with the same number + /// of storage reads and writes. + /// + /// - the destination bag has at least one node, which will need its next pointer updated. + /// + /// NOTE: while this scenario specifically targets a worst case for the bags-list, it should + /// also elicit a worst case for other known `VoterList` implementations; although + /// this may not be true against unknown `VoterList` implementations. + fn new(origin_weight: BalanceOf, is_increase: bool) -> Result { + ensure!(!origin_weight.is_zero(), "origin weight must be greater than 0"); + + // burn the entire issuance. + let i = asset::burn::(asset::total_issuance::()); + core::mem::forget(i); + + // create accounts with the origin weight + + let (origin_stash1, origin_controller1) = create_stash_controller_with_balance::( + USER_SEED + 2, + origin_weight, + RewardDestination::Staked, + )?; + Staking::::nominate( + RawOrigin::Signed(origin_controller1.clone()).into(), + // NOTE: these don't really need to be validators. + vec![T::Lookup::unlookup(account("random_validator", 0, SEED))], + )?; + + let (_origin_stash2, origin_controller2) = create_stash_controller_with_balance::( + USER_SEED + 3, + origin_weight, + RewardDestination::Staked, + )?; + Staking::::nominate( + RawOrigin::Signed(origin_controller2).into(), + vec![T::Lookup::unlookup(account("random_validator", 0, SEED))], + )?; + + // find a destination weight that will trigger the worst case scenario + let dest_weight_as_vote = + T::VoterList::score_update_worst_case(&origin_stash1, is_increase); + + let total_issuance = asset::total_issuance::(); + + let dest_weight = + T::CurrencyToVote::to_currency(dest_weight_as_vote as u128, total_issuance); + + // create an account with the worst case destination weight + let (_dest_stash1, dest_controller1) = create_stash_controller_with_balance::( + USER_SEED + 1, + dest_weight, + RewardDestination::Staked, + )?; + Staking::::nominate( + RawOrigin::Signed(dest_controller1).into(), + vec![T::Lookup::unlookup(account("random_validator", 0, SEED))], + )?; + + Ok(ListScenario { origin_stash1, origin_controller1, dest_weight }) + } +} + +const USER_SEED: u32 = 999666; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn bond() { + let stash = create_funded_user::("stash", USER_SEED, 100); + let reward_destination = RewardDestination::Staked; + let amount = asset::existential_deposit::() * 10u32.into(); + whitelist_account!(stash); + + #[extrinsic_call] + _(RawOrigin::Signed(stash.clone()), amount, reward_destination); + + assert!(Bonded::::contains_key(stash.clone())); + assert!(Ledger::::contains_key(stash)); + } + + #[benchmark] + fn bond_extra() -> Result<(), BenchmarkError> { + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); + + // setup the worst case list scenario. + + // the weight the nominator will start at. + let scenario = ListScenario::::new(origin_weight, true)?; + + let max_additional = scenario.dest_weight - origin_weight; + + let stash = scenario.origin_stash1.clone(); + let controller = scenario.origin_controller1; + let original_bonded: BalanceOf = Ledger::::get(&controller) + .map(|l| l.active) + .ok_or("ledger not created after")?; + + let _ = asset::mint_into_existing::( + &stash, + max_additional + asset::existential_deposit::(), + ) + .unwrap(); + + whitelist_account!(stash); + + #[extrinsic_call] + _(RawOrigin::Signed(stash), max_additional); + + let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; + let new_bonded: BalanceOf = ledger.active; + assert!(original_bonded < new_bonded); + + Ok(()) + } + + #[benchmark] + fn unbond() -> Result<(), BenchmarkError> { + // clean up any existing state. + clear_validators_and_nominators::(); + + // the weight the nominator will start at. The value used here is expected to be + // significantly higher than the first position in a list (e.g. the first bag threshold). + let origin_weight = BalanceOf::::try_from(952_994_955_240_703u128) + .map_err(|_| "balance expected to be a u128") + .unwrap(); + let scenario = ListScenario::::new(origin_weight, false)?; + + let controller = scenario.origin_controller1.clone(); + let amount = origin_weight - scenario.dest_weight; + let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; + let original_bonded: BalanceOf = ledger.active; + + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller.clone()), amount); + + let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; + let new_bonded: BalanceOf = ledger.active; + assert!(original_bonded > new_bonded); + + Ok(()) + } + + #[benchmark] + // Withdraw only updates the ledger + fn withdraw_unbonded_update( + // Slashing Spans + s: Linear<0, MAX_SPANS>, + ) -> Result<(), BenchmarkError> { + let (stash, controller) = create_stash_controller::(0, 100, RewardDestination::Staked)?; + add_slashing_spans::(&stash, s); + let amount = asset::existential_deposit::() * 5u32.into(); // Half of total + Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; + CurrentEra::::put(EraIndex::max_value()); + let ledger = Ledger::::get(&controller).ok_or("ledger not created before")?; + let original_total: BalanceOf = ledger.total; + whitelist_account!(controller); + + #[extrinsic_call] + withdraw_unbonded(RawOrigin::Signed(controller.clone()), s); + + let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; + let new_total: BalanceOf = ledger.total; + assert!(original_total > new_total); + + Ok(()) + } + + #[benchmark] + // Worst case scenario, everything is removed after the bonding duration + fn withdraw_unbonded_kill( + // Slashing Spans + s: Linear<0, MAX_SPANS>, + ) -> Result<(), BenchmarkError> { + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1; + add_slashing_spans::(&stash, s); + assert!(T::VoterList::contains(&stash)); + + let ed = asset::existential_deposit::(); + let mut ledger = Ledger::::get(&controller).unwrap(); + ledger.active = ed - One::one(); + Ledger::::insert(&controller, ledger); + CurrentEra::::put(EraIndex::max_value()); + + whitelist_account!(controller); + + #[extrinsic_call] + withdraw_unbonded(RawOrigin::Signed(controller.clone()), s); + + assert!(!Ledger::::contains_key(controller)); + assert!(!T::VoterList::contains(&stash)); + + Ok(()) + } + + #[benchmark] + fn validate() -> Result<(), BenchmarkError> { + let (stash, controller) = create_stash_controller::( + MaxNominationsOf::::get() - 1, + 100, + RewardDestination::Staked, + )?; + // because it is chilled. + assert!(!T::VoterList::contains(&stash)); + + let prefs = ValidatorPrefs::default(); + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller), prefs); + + assert!(Validators::::contains_key(&stash)); + assert!(T::VoterList::contains(&stash)); + + Ok(()) + } + + #[benchmark] + fn kick( + // scenario: we want to kick `k` nominators from nominating us (we are a validator). + // we'll assume that `k` is under 128 for the purposes of determining the slope. + // each nominator should have `T::MaxNominations::get()` validators nominated, and our + // validator should be somewhere in there. + k: Linear<1, 128>, + ) -> Result<(), BenchmarkError> { + // these are the other validators; there are `T::MaxNominations::get() - 1` of them, so + // there are a total of `T::MaxNominations::get()` validators in the system. + let rest_of_validators = + create_validators_with_seed::(MaxNominationsOf::::get() - 1, 100, 415)?; + + // this is the validator that will be kicking. + let (stash, controller) = create_stash_controller::( + MaxNominationsOf::::get() - 1, + 100, + RewardDestination::Staked, + )?; + let stash_lookup = T::Lookup::unlookup(stash.clone()); + + // they start validating. + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), Default::default())?; + + // we now create the nominators. there will be `k` of them; each will nominate all + // validators. we will then kick each of the `k` nominators from the main validator. + let mut nominator_stashes = Vec::with_capacity(k as usize); + for i in 0..k { + // create a nominator stash. + let (n_stash, n_controller) = create_stash_controller::( + MaxNominationsOf::::get() + i, + 100, + RewardDestination::Staked, + )?; + + // bake the nominations; we first clone them from the rest of the validators. + let mut nominations = rest_of_validators.clone(); + // then insert "our" validator somewhere in there (we vary it) to avoid accidental + // optimisations/pessimisations. + nominations.insert(i as usize % (nominations.len() + 1), stash_lookup.clone()); + // then we nominate. + Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), nominations)?; + + nominator_stashes.push(n_stash); + } + + // all nominators now should be nominating our validator... + for n in nominator_stashes.iter() { + assert!(Nominators::::get(n).unwrap().targets.contains(&stash)); + } + + // we need the unlookuped version of the nominator stash for the kick. + let kicks = nominator_stashes + .iter() + .map(|n| T::Lookup::unlookup(n.clone())) + .collect::>(); + + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller), kicks); + + // all nominators now should *not* be nominating our validator... + for n in nominator_stashes.iter() { + assert!(!Nominators::::get(n).unwrap().targets.contains(&stash)); + } + + Ok(()) + } + + #[benchmark] + // Worst case scenario, T::MaxNominations::get() + fn nominate(n: Linear<1, { MaxNominationsOf::::get() }>) -> Result<(), BenchmarkError> { + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); + + // setup a worst case list scenario. Note we don't care about the destination position, + // because we are just doing an insert into the origin position. + ListScenario::::new(origin_weight, true)?; + let (stash, controller) = create_stash_controller_with_balance::( + SEED + MaxNominationsOf::::get() + 1, /* make sure the account does not conflict + * with others */ + origin_weight, + RewardDestination::Staked, + ) + .unwrap(); + + assert!(!Nominators::::contains_key(&stash)); + assert!(!T::VoterList::contains(&stash)); + + let validators = create_validators::(n, 100).unwrap(); + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller), validators); + + assert!(Nominators::::contains_key(&stash)); + assert!(T::VoterList::contains(&stash)); + + Ok(()) + } + + #[benchmark] + fn chill() -> Result<(), BenchmarkError> { + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1; + assert!(T::VoterList::contains(&stash)); + + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller)); + + assert!(!T::VoterList::contains(&stash)); + + Ok(()) + } + + #[benchmark] + fn set_payee() -> Result<(), BenchmarkError> { + let (stash, controller) = + create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; + assert_eq!(Payee::::get(&stash), Some(RewardDestination::Staked)); + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller.clone()), RewardDestination::Account(controller.clone())); + + assert_eq!(Payee::::get(&stash), Some(RewardDestination::Account(controller))); + + Ok(()) + } + + #[benchmark] + fn update_payee() -> Result<(), BenchmarkError> { + let (stash, controller) = + create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; + Payee::::insert(&stash, { + #[allow(deprecated)] + RewardDestination::Controller + }); + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller.clone()), controller.clone()); + + assert_eq!(Payee::::get(&stash), Some(RewardDestination::Account(controller))); + + Ok(()) + } + + #[benchmark] + fn set_controller() -> Result<(), BenchmarkError> { + let (stash, ctlr) = + create_unique_stash_controller::(9000, 100, RewardDestination::Staked, false)?; + // ensure `ctlr` is the currently stored controller. + assert!(!Ledger::::contains_key(&stash)); + assert!(Ledger::::contains_key(&ctlr)); + assert_eq!(Bonded::::get(&stash), Some(ctlr.clone())); + + whitelist_account!(stash); + + #[extrinsic_call] + _(RawOrigin::Signed(stash.clone())); + + assert!(Ledger::::contains_key(&stash)); + + Ok(()) + } + + #[benchmark] + fn set_validator_count() { + let validator_count = T::MaxValidatorSet::get() - 1; + + #[extrinsic_call] + _(RawOrigin::Root, validator_count); + + assert_eq!(ValidatorCount::::get(), validator_count); + } + + #[benchmark] + fn force_no_eras() { + #[extrinsic_call] + _(RawOrigin::Root); + + assert_eq!(ForceEra::::get(), Forcing::ForceNone); + } + + #[benchmark] + fn force_new_era() { + #[extrinsic_call] + _(RawOrigin::Root); + + assert_eq!(ForceEra::::get(), Forcing::ForceNew); + } + + #[benchmark] + fn force_new_era_always() { + #[extrinsic_call] + _(RawOrigin::Root); + + assert_eq!(ForceEra::::get(), Forcing::ForceAlways); + } + + #[benchmark] + // Worst case scenario, the list of invulnerables is very long. + fn set_invulnerables(v: Linear<0, { T::MaxInvulnerables::get() }>) { + let mut invulnerables = Vec::new(); + for i in 0..v { + invulnerables.push(account("invulnerable", i, SEED)); + } + + #[extrinsic_call] + _(RawOrigin::Root, invulnerables); + + assert_eq!(Invulnerables::::get().len(), v as usize); + } + + #[benchmark] + fn deprecate_controller_batch( + // We pass a dynamic number of controllers to the benchmark, up to + // `MaxControllersInDeprecationBatch`. + u: Linear<0, { T::MaxControllersInDeprecationBatch::get() }>, + ) -> Result<(), BenchmarkError> { + let mut controllers: Vec<_> = vec![]; + let mut stashes: Vec<_> = vec![]; + for i in 0..u as u32 { + let (stash, controller) = + create_unique_stash_controller::(i, 100, RewardDestination::Staked, false)?; + controllers.push(controller); + stashes.push(stash); + } + let bounded_controllers: BoundedVec<_, T::MaxControllersInDeprecationBatch> = + BoundedVec::try_from(controllers.clone()).unwrap(); + + #[extrinsic_call] + _(RawOrigin::Root, bounded_controllers); + + for i in 0..u as u32 { + let stash = &stashes[i as usize]; + let controller = &controllers[i as usize]; + // Ledger no longer keyed by controller. + assert_eq!(Ledger::::get(controller), None); + // Bonded now maps to the stash. + assert_eq!(Bonded::::get(stash), Some(stash.clone())); + // Ledger is now keyed by stash. + assert_eq!(Ledger::::get(stash).unwrap().stash, *stash); + } + + Ok(()) + } + + #[benchmark] + fn force_unstake( + // Slashing Spans + s: Linear<0, MAX_SPANS>, + ) -> Result<(), BenchmarkError> { + // Clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1; + assert!(T::VoterList::contains(&stash)); + add_slashing_spans::(&stash, s); + + #[extrinsic_call] + _(RawOrigin::Root, stash.clone(), s); + + assert!(!Ledger::::contains_key(&controller)); + assert!(!T::VoterList::contains(&stash)); + + Ok(()) + } + + #[benchmark] + fn cancel_deferred_slash(s: Linear<1, MAX_SLASHES>) { + let era = EraIndex::one(); + let dummy_account = || T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap(); + + // Insert `s` unapplied slashes with the new key structure + for i in 0..s { + let slash_key = (dummy_account(), Perbill::from_percent(i as u32 % 100), i); + let unapplied_slash = UnappliedSlash:: { + validator: slash_key.0.clone(), + own: Zero::zero(), + others: WeakBoundedVec::default(), + reporter: Default::default(), + payout: Zero::zero(), + }; + UnappliedSlashes::::insert(era, slash_key.clone(), unapplied_slash); + } + + let slash_keys: Vec<_> = (0..s) + .map(|i| (dummy_account(), Perbill::from_percent(i as u32 % 100), i)) + .collect(); + + #[extrinsic_call] + _(RawOrigin::Root, era, slash_keys.clone()); + + // Ensure all `s` slashes are removed + for key in &slash_keys { + assert!(UnappliedSlashes::::get(era, key).is_none()); + } + } + + #[benchmark] + fn payout_stakers_alive_staked( + n: Linear<0, { T::MaxExposurePageSize::get() as u32 }>, + ) -> Result<(), BenchmarkError> { + let (validator, nominators, current_era) = create_validator_with_nominators::( + n, + T::MaxExposurePageSize::get() as u32, + false, + true, + RewardDestination::Staked, + )?; + + // set the commission for this particular era as well. + >::insert( + current_era, + validator.clone(), + Validators::::get(&validator), + ); + + let caller = whitelisted_caller(); + let balance_before = asset::stakeable_balance::(&validator); + let mut nominator_balances_before = Vec::new(); + for (stash, _) in &nominators { + let balance = asset::stakeable_balance::(stash); + nominator_balances_before.push(balance); + } + + #[extrinsic_call] + payout_stakers(RawOrigin::Signed(caller), validator.clone(), current_era); + + let balance_after = asset::stakeable_balance::(&validator); + ensure!( + balance_before < balance_after, + "Balance of validator stash should have increased after payout.", + ); + for ((stash, _), balance_before) in nominators.iter().zip(nominator_balances_before.iter()) + { + let balance_after = asset::stakeable_balance::(stash); + ensure!( + balance_before < &balance_after, + "Balance of nominator stash should have increased after payout.", + ); + } + + Ok(()) + } + + #[benchmark] + fn rebond(l: Linear<1, { T::MaxUnlockingChunks::get() as u32 }>) -> Result<(), BenchmarkError> { + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get() + .max(asset::existential_deposit::()) + // we use 100 to play friendly with the list threshold values in the mock + .max(100u32.into()); + + // setup a worst case list scenario. + let scenario = ListScenario::::new(origin_weight, true)?; + let dest_weight = scenario.dest_weight; + + // rebond an amount that will give the user dest_weight + let rebond_amount = dest_weight - origin_weight; + + // spread that amount to rebond across `l` unlocking chunks, + let value = rebond_amount / l.into(); + // if `value` is zero, we need a greater delta between dest <=> origin weight + assert_ne!(value, Zero::zero()); + // so the sum of unlocking chunks puts voter into the dest bag. + assert!(value * l.into() + origin_weight > origin_weight); + assert!(value * l.into() + origin_weight <= dest_weight); + let unlock_chunk = UnlockChunk::> { value, era: EraIndex::zero() }; + + let controller = scenario.origin_controller1; + let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); + + for _ in 0..l { + staking_ledger.unlocking.try_push(unlock_chunk.clone()).unwrap() + } + Ledger::::insert(controller.clone(), staking_ledger.clone()); + let original_bonded: BalanceOf = staking_ledger.active; + + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller.clone()), rebond_amount); + + let ledger = Ledger::::get(&controller).ok_or("ledger not created after")?; + let new_bonded: BalanceOf = ledger.active; + assert!(original_bonded < new_bonded); + + Ok(()) + } + + #[benchmark] + fn reap_stash(s: Linear<1, MAX_SPANS>) -> Result<(), BenchmarkError> { + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let controller = scenario.origin_controller1.clone(); + let stash = scenario.origin_stash1; + + add_slashing_spans::(&stash, s); + let l = + StakingLedger::::new(stash.clone(), asset::existential_deposit::() - One::one()); + Ledger::::insert(&controller, l); + + assert!(Bonded::::contains_key(&stash)); + assert!(T::VoterList::contains(&stash)); + + whitelist_account!(controller); + + #[extrinsic_call] + _(RawOrigin::Signed(controller), stash.clone(), s); + + assert!(!Bonded::::contains_key(&stash)); + assert!(!T::VoterList::contains(&stash)); + + Ok(()) + } + + #[benchmark] + fn set_staking_configs_all_set() { + #[extrinsic_call] + set_staking_configs( + RawOrigin::Root, + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(Percent::max_value()), + ConfigOp::Set(Perbill::max_value()), + ConfigOp::Set(Percent::max_value()), + ); + + assert_eq!(MinNominatorBond::::get(), BalanceOf::::max_value()); + assert_eq!(MinValidatorBond::::get(), BalanceOf::::max_value()); + assert_eq!(MaxNominatorsCount::::get(), Some(u32::MAX)); + assert_eq!(MaxValidatorsCount::::get(), Some(u32::MAX)); + assert_eq!(ChillThreshold::::get(), Some(Percent::from_percent(100))); + assert_eq!(MinCommission::::get(), Perbill::from_percent(100)); + assert_eq!(MaxStakedRewards::::get(), Some(Percent::from_percent(100))); + } + + #[benchmark] + fn set_staking_configs_all_remove() { + #[extrinsic_call] + set_staking_configs( + RawOrigin::Root, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ); + + assert!(!MinNominatorBond::::exists()); + assert!(!MinValidatorBond::::exists()); + assert!(!MaxNominatorsCount::::exists()); + assert!(!MaxValidatorsCount::::exists()); + assert!(!ChillThreshold::::exists()); + assert!(!MinCommission::::exists()); + assert!(!MaxStakedRewards::::exists()); + } + + #[benchmark] + fn chill_other() -> Result<(), BenchmarkError> { + // clean up any existing state. + clear_validators_and_nominators::(); + + let origin_weight = MinNominatorBond::::get().max(asset::existential_deposit::()); + + // setup a worst case list scenario. Note that we don't care about the setup of the + // destination position because we are doing a removal from the list but no insert. + let scenario = ListScenario::::new(origin_weight, true)?; + let stash = scenario.origin_stash1; + assert!(T::VoterList::contains(&stash)); + + Staking::::set_staking_configs( + RawOrigin::Root.into(), + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(0), + ConfigOp::Set(0), + ConfigOp::Set(Percent::from_percent(0)), + ConfigOp::Set(Zero::zero()), + ConfigOp::Noop, + )?; + + let caller = whitelisted_caller(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), stash.clone()); + + assert!(!T::VoterList::contains(&stash)); + + Ok(()) + } + + #[benchmark] + fn force_apply_min_commission() -> Result<(), BenchmarkError> { + // Clean up any existing state + clear_validators_and_nominators::(); + + // Create a validator with a commission of 50% + let (stash, controller) = create_stash_controller::(1, 1, RewardDestination::Staked)?; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; + Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; + + // Sanity check + assert_eq!( + Validators::::get(&stash), + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() } + ); + + // Set the min commission to 75% + MinCommission::::set(Perbill::from_percent(75)); + let caller = whitelisted_caller(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), stash.clone()); + + // The validators commission has been bumped to 75% + assert_eq!( + Validators::::get(&stash), + ValidatorPrefs { commission: Perbill::from_percent(75), ..Default::default() } + ); + + Ok(()) + } + + #[benchmark] + fn set_min_commission() { + let min_commission = Perbill::max_value(); + + #[extrinsic_call] + _(RawOrigin::Root, min_commission); + + assert_eq!(MinCommission::::get(), Perbill::from_percent(100)); + } + + #[benchmark] + fn restore_ledger() -> Result<(), BenchmarkError> { + let (stash, controller) = create_stash_controller::(0, 100, RewardDestination::Staked)?; + // corrupt ledger. + Ledger::::remove(controller); + + #[extrinsic_call] + _(RawOrigin::Root, stash.clone(), None, None, None); + + assert_eq!(Staking::::inspect_bond_state(&stash), Ok(LedgerIntegrityState::Ok)); + + Ok(()) + } + + #[benchmark] + fn migrate_currency() -> Result<(), BenchmarkError> { + let (stash, _ctrl) = + create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; + let stake = asset::staked::(&stash); + migrate_to_old_currency::(stash.clone()); + // no holds + assert!(asset::staked::(&stash).is_zero()); + whitelist_account!(stash); + + #[extrinsic_call] + _(RawOrigin::Signed(stash.clone()), stash.clone()); + + assert_eq!(asset::staked::(&stash), stake); + Ok(()) + } + + #[benchmark] + fn apply_slash() -> Result<(), BenchmarkError> { + let era = EraIndex::one(); + ActiveEra::::put(ActiveEraInfo { index: era, start: None }); + let (validator, nominators, _current_era) = create_validator_with_nominators::( + T::MaxExposurePageSize::get() as u32, + T::MaxExposurePageSize::get() as u32, + false, + true, + RewardDestination::Staked, + )?; + let slash_fraction = Perbill::from_percent(10); + let page_index = 0; + let slashed_balance = BalanceOf::::from(10u32); + + let slash_key = (validator.clone(), slash_fraction, page_index); + let slashed_nominators = + nominators.iter().map(|(n, _)| (n.clone(), slashed_balance)).collect::>(); + + let unapplied_slash = UnappliedSlash:: { + validator: validator.clone(), + own: slashed_balance, + others: WeakBoundedVec::force_from(slashed_nominators, None), + reporter: Default::default(), + payout: Zero::zero(), + }; + + // Insert an unapplied slash to be processed. + UnappliedSlashes::::insert(era, slash_key.clone(), unapplied_slash); + + #[extrinsic_call] + _(RawOrigin::Signed(validator.clone()), era, slash_key.clone()); + + // Ensure the slash has been applied and removed. + assert!(UnappliedSlashes::::get(era, &slash_key).is_none()); + + Ok(()) + } + + #[benchmark] + fn process_offence_queue() -> Result<(), BenchmarkError> { + // in tests, it is likely that `SlashDeferDuration` is zero and this will also insta-apply + // the slash. Remove this just in case. + #[cfg(test)] + crate::mock::SlashDeferDuration::set(77); + + // create at least one validator with a full page of exposure, as per `MaxExposurePageSize`. + let all_validators = crate::testing_utils::create_validators_with_nominators_for_era::( + // we create more validators, but all of the nominators will back the first one + ValidatorCount::::get(), + // create two full exposure pages + 2 * T::MaxExposurePageSize::get(), + 16, + false, + Some(1), + )?; + let offender = + T::Lookup::lookup(all_validators.first().cloned().expect("must exist")).unwrap(); + + // plan an era with this set + let _new_validators = Rotator::::legacy_insta_plan_era(); + // activate the previous one + Rotator::::start_era( + crate::ActiveEraInfo { index: Rotator::::planning_era() - 1, start: Some(1) }, + 42, // start session index doesn't really matter, + 2, // timestamp doesn't really matter + ); + + // ensure our offender has at least a full exposure page + let offender_exposure = + Eras::::get_full_exposure(Rotator::::planning_era(), &offender); + ensure!( + offender_exposure.others.len() as u32 == 2 * T::MaxExposurePageSize::get(), + "exposure not created" + ); + + // create an offence for this validator + let slash_session = 42; + let offences = vec![rc_client::Offence { + offender: offender.clone(), + reporters: Default::default(), + slash_fraction: Perbill::from_percent(50), + }]; + as rc_client::AHStakingInterface>::on_new_offences( + slash_session, + offences, + ); + + // ensure offence is submitted + ensure!( + ValidatorSlashInEra::::contains_key(Rotator::::active_era(), offender), + "offence not submitted" + ); + ensure!( + OffenceQueueEras::::get().unwrap_or_default() == vec![Rotator::::active_era()], + "offence should be queued" + ); + + #[block] + { + slashing::process_offence::(); + } + + ensure!(OffenceQueueEras::::get().is_none(), "offence should not be queued"); + + Ok(()) + } + + #[benchmark] + fn rc_on_offence( + v: Linear<2, { T::MaxValidatorSet::get() / 2 }>, + ) -> Result<(), BenchmarkError> { + let initial_era = Rotator::::planning_era(); + let _ = crate::testing_utils::create_validators_with_nominators_for_era::( + 2 * v, + // number of nominators is irrelevant here, so we hardcode these + 1000, + 16, + false, + None, + )?; + + // plan new era + let new_validators = Rotator::::legacy_insta_plan_era(); + ensure!(Rotator::::planning_era() == initial_era + 1, "era should be incremented"); + // activate the previous one + Rotator::::start_era( + crate::ActiveEraInfo { index: initial_era, start: Some(1) }, + 42, // start session index doesn't really matter, + 2, // timestamp doesn't really matter + ); + + // this is needed in the slashing code, and is a sign that `initial_era + 1` is planned! + ensure!( + ErasStartSessionIndex::::get(initial_era + 1).unwrap() == 42, + "EraStartSessionIndex not set" + ); + + // slash the first half of the validators + let to_slash_count = new_validators.len() / 2; + let to_slash = new_validators.into_iter().take(to_slash_count).collect::>(); + let one_slashed = to_slash.first().cloned().unwrap(); + let offences = to_slash + .into_iter() + .map(|offender| rc_client::Offence { + offender, + reporters: Default::default(), + slash_fraction: Perbill::from_percent(50), + }) + .collect::>(); + let slash_session = 42; + + // has not pending slash for these guys now + ensure!( + !ValidatorSlashInEra::::contains_key(initial_era + 1, &one_slashed), + "offence submitted???" + ); + + #[block] + { + as rc_client::AHStakingInterface>::on_new_offences( + slash_session, + offences, + ); + } + + // ensure offence is recorded + ensure!( + ValidatorSlashInEra::::contains_key(initial_era + 1, one_slashed), + "offence not submitted" + ); + + Ok(()) + } + + #[benchmark] + fn rc_on_session_report() -> Result<(), BenchmarkError> { + let initial_planned_era = Rotator::::planning_era(); + let initial_active_era = Rotator::::active_era(); + + // create a small, arbitrary number of stakers. This is just for sanity of the era planning, + // numbers don't matter. + crate::testing_utils::create_validators_with_nominators_for_era::( + 10, 50, 2, false, None, + )?; + + // plan new era + let _new_validators = Rotator::::legacy_insta_plan_era(); + ensure!( + CurrentEra::::get().unwrap() == initial_planned_era + 1, + "era should be incremented" + ); + + // receive a session report with timestamp that actives the previous one. + let validator_points = (0..T::MaxValidatorSet::get()) + .map(|v| (account::("random", v, SEED), v)) + .collect::>(); + let activation_timestamp = Some((1u64, initial_planned_era + 1)); + let report = rc_client::SessionReport { + end_index: 42, + leftover: false, + validator_points, + activation_timestamp, + }; + + #[block] + { + as rc_client::AHStakingInterface>::on_relay_session_report(report); + } + + ensure!(Rotator::::active_era() == initial_active_era + 1, "active era not bumped"); + Ok(()) + } + + impl_benchmark_test_suite!( + Staking, + crate::mock::ExtBuilder::default().has_stakers(true), + crate::mock::Test, + exec_name = build_and_execute + ); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{ExtBuilder, RuntimeOrigin, Staking, Test}; + use frame_support::assert_ok; + + #[test] + fn create_validators_with_nominators_for_era_works() { + ExtBuilder::default().build_and_execute(|| { + let v = 10; + let n = 100; + + create_validators_with_nominators_for_era::( + v, + n, + MaxNominationsOf::::get() as usize, + false, + None, + ) + .unwrap(); + + let count_validators = Validators::::iter().count(); + let count_nominators = Nominators::::iter().count(); + + assert_eq!(count_validators, Validators::::count() as usize); + assert_eq!(count_nominators, Nominators::::count() as usize); + + assert_eq!(count_validators, v as usize); + assert_eq!(count_nominators, n as usize); + }); + } + + #[test] + fn create_validator_with_nominators_works() { + ExtBuilder::default().build_and_execute(|| { + let n = 10; + + let (validator_stash, nominators, current_era) = + create_validator_with_nominators::( + n, + <::MaxExposurePageSize as Get<_>>::get(), + false, + false, + RewardDestination::Staked, + ) + .unwrap(); + + assert_eq!(nominators.len() as u32, n); + + let original_stakeable_balance = asset::stakeable_balance::(&validator_stash); + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + validator_stash, + current_era, + 0 + )); + let new_stakeable_balance = asset::stakeable_balance::(&validator_stash); + + // reward increases stakeable balance + assert!(original_stakeable_balance < new_stakeable_balance); + }); + } + + #[test] + fn add_slashing_spans_works() { + ExtBuilder::default().build_and_execute(|| { + let n = 10; + + let (validator_stash, _nominators, _) = create_validator_with_nominators::( + n, + <::MaxExposurePageSize as Get<_>>::get(), + false, + false, + RewardDestination::Staked, + ) + .unwrap(); + + // Add 20 slashing spans + let num_of_slashing_spans = 20; + add_slashing_spans::(&validator_stash, num_of_slashing_spans); + + let slashing_spans = SlashingSpans::::get(&validator_stash).unwrap(); + assert_eq!(slashing_spans.iter().count(), num_of_slashing_spans as usize); + for i in 0..num_of_slashing_spans { + assert!(SpanSlash::::contains_key((&validator_stash, i))); + } + + // Test everything is cleaned up + assert_ok!(Staking::kill_stash(&validator_stash, num_of_slashing_spans)); + assert!(SlashingSpans::::get(&validator_stash).is_none()); + for i in 0..num_of_slashing_spans { + assert!(!SpanSlash::::contains_key((&validator_stash, i))); + } + }); + } +} diff --git a/substrate/frame/staking-async/src/election_size_tracker.rs b/substrate/frame/staking-async/src/election_size_tracker.rs new file mode 100644 index 0000000000000..6287339ef48a2 --- /dev/null +++ b/substrate/frame/staking-async/src/election_size_tracker.rs @@ -0,0 +1,259 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! ## A static size tracker for the election snapshot data. +//! +//! ### Overview +//! +//! The goal of the size tracker is to provide a static, no-allocation byte tracker to be +//! used by the election data provider when preparing the results of +//! [`ElectionDataProvider::electing_voters`]. The [`StaticTracker`] implementation uses +//! [`codec::Encode::size_hint`] to estimate the SCALE encoded size of the snapshot voters struct +//! as it is being constructed without requiring extra stack allocations. +//! +//! The [`StaticTracker::try_register_voter`] is called to update the static tracker internal +//! state, if It will return an error if the resulting SCALE encoded size (in bytes) is larger than +//! the provided `DataProviderBounds`. +//! +//! ### Example +//! +//! ```ignore +//! use pallet_staking_async::election_size_tracker::*; +//! +//! // instantiates a new tracker. +//! let mut size_tracker = StaticTracker::::default(); +//! +//! let voter_bounds = ElectionBoundsBuilder::default().voter_size(1_00.into()).build().voters; +//! +//! let mut sorted_voters = T::VoterList.iter(); +//! let mut selected_voters = vec![]; +//! +//! // fit as many voters in the vec as the bounds permit. +//! for v in sorted_voters { +//! let voter = (v, weight_of(&v), targets_of(&v)); +//! if size_tracker.try_register_voter(&voter, &voter_bounds).is_err() { +//! // voter bounds size exhausted +//! break; +//! } +//! selected_voters.push(voter); +//! } +//! +//! // The SCALE encoded size in bytes of `selected_voters` is guaranteed to be below +//! // `voter_bounds`. +//! debug_assert!( +//! selected_voters.encoded_size() <= +//! SizeTracker::::final_byte_size_of(size_tracker.num_voters, size_tracker.size) +//! ); +//! ``` +//! +//! ### Implementation Details +//! +//! The current implementation of the static tracker is tightly coupled with the staking pallet +//! implementation, namely the representation of a voter ([`VoterOf`]). The SCALE encoded byte size +//! is calculated using [`Encode::size_hint`] of each type in the voter tuple. Each voter's byte +//! size is the sum of: +//! - 1 * [`Encode::size_hint`] of the `AccountId` type; +//! - 1 * [`Encode::size_hint`] of the `VoteWeight` type; +//! - `num_votes` * [`Encode::size_hint`] of the `AccountId` type. + +use codec::Encode; +use frame_election_provider_support::{ + bounds::{DataProviderBounds, SizeBound}, + ElectionDataProvider, VoterOf, +}; + +/// Keeps track of the SCALE encoded byte length of the snapshot's voters or targets. +/// +/// The tracker calculates the bytes used based on static rules, without requiring any actual +/// encoding or extra allocations. +#[derive(Clone, Copy, Debug)] +pub struct StaticTracker { + pub size: usize, + pub counter: usize, + _marker: core::marker::PhantomData, +} + +impl Default for StaticTracker { + fn default() -> Self { + Self { size: 0, counter: 0, _marker: Default::default() } + } +} + +impl StaticTracker +where + DataProvider: ElectionDataProvider, +{ + /// Tries to register a new voter. + /// + /// If the new voter exhausts the provided bounds, return an error. Otherwise, the internal + /// state of the tracker is updated with the new registered voter. + pub fn try_register_voter( + &mut self, + voter: &VoterOf, + bounds: &DataProviderBounds, + ) -> Result<(), ()> { + let tracker_size_after = { + let voter_hint = Self::voter_size_hint(voter); + Self::final_byte_size_of(self.counter + 1, self.size.saturating_add(voter_hint)) + }; + + match bounds.size_exhausted(SizeBound(tracker_size_after as u32)) { + true => Err(()), + false => { + self.size = tracker_size_after; + self.counter += 1; + Ok(()) + }, + } + } + + /// Calculates the size of the voter to register based on [`Encode::size_hint`]. + fn voter_size_hint(voter: &VoterOf) -> usize { + let (voter_account, vote_weight, targets) = voter; + + voter_account + .size_hint() + .saturating_add(vote_weight.size_hint()) + .saturating_add(voter_account.size_hint().saturating_mul(targets.len())) + } + + /// Tries to register a new target. + /// + /// If the new target exhausts the provided bounds, return an error. Otherwise, the internal + /// state of the tracker is updated with the new registered target. + pub fn try_register_target( + &mut self, + target: DataProvider::AccountId, + bounds: &DataProviderBounds, + ) -> Result<(), ()> { + let tracker_size_after = Self::final_byte_size_of( + self.counter + 1, + self.size.saturating_add(target.size_hint()), + ); + + match bounds.size_exhausted(SizeBound(tracker_size_after as u32)) { + true => Err(()), + false => { + self.size = tracker_size_after; + self.counter += 1; + Ok(()) + }, + } + } + + /// Size of the SCALE encoded prefix with a given length. + #[inline] + fn length_prefix(len: usize) -> usize { + use codec::{Compact, CompactLen}; + Compact::::compact_len(&(len as u32)) + } + + /// Calculates the final size in bytes of the SCALE encoded snapshot voter struct. + fn final_byte_size_of(num_voters: usize, size: usize) -> usize { + Self::length_prefix(num_voters).saturating_add(size) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + mock::{AccountId, Staking, Test}, + BoundedVec, MaxNominationsOf, + }; + use frame_election_provider_support::bounds::ElectionBoundsBuilder; + use sp_core::bounded_vec; + + type Voters = BoundedVec>; + + #[test] + pub fn election_size_tracker_works() { + let mut voters: Vec<(u64, u64, Voters)> = vec![]; + let mut size_tracker = StaticTracker::::default(); + let voter_bounds = ElectionBoundsBuilder::default().voters_size(1_50.into()).build().voters; + + // register 1 voter with 1 vote. + let voter = (1, 10, bounded_vec![2]); + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_ok()); + voters.push(voter); + + assert_eq!( + StaticTracker::::final_byte_size_of(size_tracker.counter, size_tracker.size), + voters.encoded_size() + ); + + // register another voter, now with 3 votes. + let voter = (2, 20, bounded_vec![3, 4, 5]); + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_ok()); + voters.push(voter); + + assert_eq!( + StaticTracker::::final_byte_size_of(size_tracker.counter, size_tracker.size), + voters.encoded_size() + ); + + // register noop vote (unlikely to happen). + let voter = (3, 30, bounded_vec![]); + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_ok()); + voters.push(voter); + + assert_eq!( + StaticTracker::::final_byte_size_of(size_tracker.counter, size_tracker.size), + voters.encoded_size() + ); + } + + #[test] + pub fn election_size_tracker_bounds_works() { + let mut voters: Vec<(u64, u64, Voters)> = vec![]; + let mut size_tracker = StaticTracker::::default(); + let voter_bounds = ElectionBoundsBuilder::default().voters_size(1_00.into()).build().voters; + + let voter = (1, 10, bounded_vec![2]); + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_ok()); + voters.push(voter); + + assert_eq!( + StaticTracker::::final_byte_size_of(size_tracker.counter, size_tracker.size), + voters.encoded_size() + ); + + assert!(size_tracker.size > 0 && size_tracker.size < 1_00); + let size_before_overflow = size_tracker.size; + + // try many voters that will overflow the tracker's buffer. + let voter = (2, 10, bounded_vec![2, 3, 4, 5, 6, 7, 8, 9]); + voters.push(voter.clone()); + + assert!(size_tracker.try_register_voter(&voter, &voter_bounds).is_err()); + assert!(size_tracker.size > 0 && size_tracker.size < 1_00); + + // size of the tracker did not update when trying to register votes failed. + assert_eq!(size_tracker.size, size_before_overflow); + } + + #[test] + fn len_prefix_works() { + let length_samples = + vec![0usize, 1, 62, 63, 64, 16383, 16384, 16385, 1073741822, 1073741823, 1073741824]; + + for s in length_samples { + // the encoded size of a vector of n bytes should be n + the length prefix + assert_eq!(vec![1u8; s].encoded_size(), StaticTracker::::length_prefix(s) + s); + } + } +} diff --git a/substrate/frame/staking-async/src/ledger.rs b/substrate/frame/staking-async/src/ledger.rs new file mode 100644 index 0000000000000..5d5265816f1bf --- /dev/null +++ b/substrate/frame/staking-async/src/ledger.rs @@ -0,0 +1,582 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A Ledger implementation for stakers. +//! +//! A [`StakingLedger`] encapsulates all the state and logic related to the stake of bonded +//! stakers, namely, it handles the following storage items: +//! * [`Bonded`]: mutates and reads the state of the controller <> stash bond map (to be deprecated +//! soon); +//! * [`Ledger`]: mutates and reads the state of all the stakers. The [`Ledger`] storage item stores +//! instances of [`StakingLedger`] keyed by the staker's controller account and should be mutated +//! and read through the [`StakingLedger`] API; +//! * [`Payee`]: mutates and reads the reward destination preferences for a bonded stash. +//! * Staking locks: mutates the locks for staking. +//! +//! NOTE: All the storage operations related to the staking ledger (both reads and writes) *MUST* be +//! performed through the methods exposed by the [`StakingLedger`] implementation in order to ensure +//! state consistency. + +use crate::{ + asset, log, BalanceOf, Bonded, Config, DecodeWithMemTracking, Error, Ledger, Pallet, Payee, + RewardDestination, Vec, VirtualStakers, +}; +use alloc::collections::BTreeMap; +use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; +use frame_support::{ + defensive, ensure, + traits::{Defensive, DefensiveSaturating, Get}, + BoundedVec, CloneNoBound, DebugNoBound, EqNoBound, PartialEqNoBound, +}; +use scale_info::TypeInfo; +use sp_runtime::{traits::Zero, DispatchResult, Perquintill, Rounding, Saturating}; +use sp_staking::{EraIndex, OnStakingUpdate, StakingAccount, StakingInterface}; + +/// Just a Balance/BlockNumber tuple to encode when a chunk of funds will be unlocked. +#[derive( + PartialEq, Eq, Clone, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo, MaxEncodedLen, +)] +pub struct UnlockChunk { + /// Amount of funds to be unlocked. + #[codec(compact)] + pub(crate) value: Balance, + /// Era number at which point it'll be unlocked. + #[codec(compact)] + pub(crate) era: EraIndex, +} + +/// The ledger of a (bonded) stash. +/// +/// Note: All the reads and mutations to the [`Ledger`], [`Bonded`] and [`Payee`] storage items +/// *MUST* be performed through the methods exposed by this struct, to ensure the consistency of +/// ledger's data and corresponding staking lock +/// +/// TODO: move struct definition and full implementation into `/src/ledger.rs`. Currently +/// leaving here to enforce a clean PR diff, given how critical this logic is. Tracking issue +/// . +#[derive( + PartialEqNoBound, EqNoBound, CloneNoBound, Encode, Decode, DebugNoBound, TypeInfo, MaxEncodedLen, +)] +#[scale_info(skip_type_params(T))] +pub struct StakingLedger { + /// The stash account whose balance is actually locked and at stake. + pub stash: T::AccountId, + + /// The total amount of the stash's balance that we are currently accounting for. + /// It's just `active` plus all the `unlocking` balances. + #[codec(compact)] + pub total: BalanceOf, + + /// The total amount of the stash's balance that will be at stake in any forthcoming + /// rounds. + #[codec(compact)] + pub active: BalanceOf, + + /// Any balance that is becoming free, which may eventually be transferred out of the stash + /// (assuming it doesn't get slashed first). It is assumed that this will be treated as a first + /// in, first out queue where the new (higher value) eras get pushed on the back. + pub unlocking: BoundedVec>, T::MaxUnlockingChunks>, + + /// The controller associated with this ledger's stash. + /// + /// This is not stored on-chain, and is only bundled when the ledger is read from storage. + /// Use [`controller`] function to get the controller associated with the ledger. + #[codec(skip)] + pub(crate) controller: Option, +} + +impl StakingLedger { + #[cfg(any(feature = "runtime-benchmarks", test))] + pub fn default_from(stash: T::AccountId) -> Self { + Self { + stash: stash.clone(), + total: Zero::zero(), + active: Zero::zero(), + unlocking: Default::default(), + controller: Some(stash), + } + } + + /// Returns a new instance of a staking ledger. + /// + /// The [`Ledger`] storage is not mutated. In order to store, `StakingLedger::update` must be + /// called on the returned staking ledger. + /// + /// Note: as the controller accounts are being deprecated, the stash account is the same as the + /// controller account. + pub fn new(stash: T::AccountId, stake: BalanceOf) -> Self { + Self { + stash: stash.clone(), + active: stake, + total: stake, + unlocking: Default::default(), + // controllers are deprecated and mapped 1-1 to stashes. + controller: Some(stash), + } + } + + /// Returns the paired account, if any. + /// + /// A "pair" refers to the tuple (stash, controller). If the input is a + /// [`StakingAccount::Stash`] variant, its pair account will be of type + /// [`StakingAccount::Controller`] and vice-versa. + /// + /// This method is meant to abstract from the runtime development the difference between stash + /// and controller. This will be deprecated once the controller is fully deprecated as well. + pub(crate) fn paired_account(account: StakingAccount) -> Option { + match account { + StakingAccount::Stash(stash) => >::get(stash), + StakingAccount::Controller(controller) => + >::get(&controller).map(|ledger| ledger.stash), + } + } + + /// Returns whether a given account is bonded. + pub(crate) fn is_bonded(account: StakingAccount) -> bool { + match account { + StakingAccount::Stash(stash) => >::contains_key(stash), + StakingAccount::Controller(controller) => >::contains_key(controller), + } + } + + /// Returns a staking ledger, if it is bonded and it exists in storage. + /// + /// This getter can be called with either a controller or stash account, provided that the + /// account is properly wrapped in the respective [`StakingAccount`] variant. This is meant to + /// abstract the concept of controller/stash accounts from the caller. + /// + /// Returns [`Error::BadState`] when a bond is in "bad state". A bond is in a bad state when a + /// stash has a controller which is bonding a ledger associated with another stash. + pub(crate) fn get(account: StakingAccount) -> Result, Error> { + let (stash, controller) = match account { + StakingAccount::Stash(stash) => + (stash.clone(), >::get(&stash).ok_or(Error::::NotStash)?), + StakingAccount::Controller(controller) => ( + Ledger::::get(&controller) + .map(|l| l.stash) + .ok_or(Error::::NotController)?, + controller, + ), + }; + + let ledger = >::get(&controller) + .map(|mut ledger| { + ledger.controller = Some(controller.clone()); + ledger + }) + .ok_or(Error::::NotController)?; + + // if ledger bond is in a bad state, return error to prevent applying operations that may + // further spoil the ledger's state. A bond is in bad state when the bonded controller is + // associated with a different ledger (i.e. a ledger with a different stash). + // + // See for more details. + ensure!( + Bonded::::get(&stash) == Some(controller) && ledger.stash == stash, + Error::::BadState + ); + + Ok(ledger) + } + + /// Returns the reward destination of a staking ledger, stored in [`Payee`]. + /// + /// Note: if the stash is not bonded and/or does not have an entry in [`Payee`], it returns the + /// default reward destination. + pub(crate) fn reward_destination( + account: StakingAccount, + ) -> Option> { + let stash = match account { + StakingAccount::Stash(stash) => Some(stash), + StakingAccount::Controller(controller) => + Self::paired_account(StakingAccount::Controller(controller)), + }; + + if let Some(stash) = stash { + >::get(stash) + } else { + defensive!("fetched reward destination from unbonded stash {}", stash); + None + } + } + + /// Returns the controller account of a staking ledger. + /// + /// Note: it will fallback into querying the [`Bonded`] storage with the ledger stash if the + /// controller is not set in `self`, which most likely means that self was fetched directly from + /// [`Ledger`] instead of through the methods exposed in [`StakingLedger`]. If the ledger does + /// not exist in storage, it returns `None`. + pub(crate) fn controller(&self) -> Option { + self.controller.clone().or_else(|| { + defensive!("fetched a controller on a ledger instance without it."); + Self::paired_account(StakingAccount::Stash(self.stash.clone())) + }) + } + + /// Inserts/updates a staking ledger account. + /// + /// Bonds the ledger if it is not bonded yet, signalling that this is a new ledger. The staking + /// lock/hold of the stash account are updated accordingly. + /// + /// Note: To ensure lock consistency, all the [`Ledger`] storage updates should be made through + /// this helper function. + pub(crate) fn update(self) -> Result<(), Error> { + if !>::contains_key(&self.stash) { + return Err(Error::::NotStash) + } + + // We skip locking virtual stakers. + if !Pallet::::is_virtual_staker(&self.stash) { + // for direct stakers, update lock on stash based on ledger. + asset::update_stake::(&self.stash, self.total) + .map_err(|_| Error::::NotEnoughFunds)?; + } + + Ledger::::insert( + &self.controller().ok_or_else(|| { + defensive!("update called on a ledger that is not bonded."); + Error::::NotController + })?, + &self, + ); + + Ok(()) + } + + /// Bonds a ledger. + /// + /// It sets the reward preferences for the bonded stash. + pub(crate) fn bond(self, payee: RewardDestination) -> Result<(), Error> { + if >::contains_key(&self.stash) { + return Err(Error::::AlreadyBonded) + } + + >::insert(&self.stash, payee); + >::insert(&self.stash, &self.stash); + self.update() + } + + /// Sets the ledger Payee. + pub(crate) fn set_payee(self, payee: RewardDestination) -> Result<(), Error> { + if !>::contains_key(&self.stash) { + return Err(Error::::NotStash) + } + + >::insert(&self.stash, payee); + Ok(()) + } + + /// Sets the ledger controller to its stash. + pub(crate) fn set_controller_to_stash(self) -> Result<(), Error> { + let controller = self.controller.as_ref() + .defensive_proof("Ledger's controller field didn't exist. The controller should have been fetched using StakingLedger.") + .ok_or(Error::::NotController)?; + + ensure!(self.stash != *controller, Error::::AlreadyPaired); + + // check if the ledger's stash is a controller of another ledger. + if let Some(bonded_ledger) = Ledger::::get(&self.stash) { + // there is a ledger bonded by the stash. In this case, the stash of the bonded ledger + // should be the same as the ledger's stash. Otherwise fail to prevent data + // inconsistencies. See for more + // details. + ensure!(bonded_ledger.stash == self.stash, Error::::BadState); + } + + >::remove(&controller); + >::insert(&self.stash, &self); + >::insert(&self.stash, &self.stash); + + Ok(()) + } + + /// Clears all data related to a staking ledger and its bond in both [`Ledger`] and [`Bonded`] + /// storage items and updates the stash staking lock. + pub(crate) fn kill(stash: &T::AccountId) -> DispatchResult { + let controller = >::get(stash).ok_or(Error::::NotStash)?; + + >::get(&controller).ok_or(Error::::NotController).map(|ledger| { + Ledger::::remove(controller); + >::remove(&stash); + >::remove(&stash); + + // kill virtual staker if it exists. + if >::take(&ledger.stash).is_none() { + // if not virtual staker, clear locks. + asset::kill_stake::(&ledger.stash)?; + } + Pallet::::deposit_event(crate::Event::::StakerRemoved { + stash: ledger.stash.clone(), + }); + Ok(()) + })? + } + + #[cfg(test)] + pub(crate) fn assert_stash_killed(stash: T::AccountId) { + assert!(!Ledger::::contains_key(&stash)); + assert!(!Bonded::::contains_key(&stash)); + assert!(!Payee::::contains_key(&stash)); + assert!(!VirtualStakers::::contains_key(&stash)); + } + + /// Remove entries from `unlocking` that are sufficiently old and reduce the + /// total by the sum of their balances. + pub(crate) fn consolidate_unlocked(self, current_era: EraIndex) -> Self { + let mut total = self.total; + let unlocking: BoundedVec<_, _> = self + .unlocking + .into_iter() + .filter(|chunk| { + if chunk.era > current_era { + true + } else { + total = total.saturating_sub(chunk.value); + false + } + }) + .collect::>() + .try_into() + .expect( + "filtering items from a bounded vec always leaves length less than bounds. qed", + ); + + Self { + stash: self.stash, + total, + active: self.active, + unlocking, + controller: self.controller, + } + } + + /// Re-bond funds that were scheduled for unlocking. + /// + /// Returns the updated ledger, and the amount actually rebonded. + pub(crate) fn rebond(mut self, value: BalanceOf) -> (Self, BalanceOf) { + let mut unlocking_balance = BalanceOf::::zero(); + + while let Some(last) = self.unlocking.last_mut() { + if unlocking_balance.defensive_saturating_add(last.value) <= value { + unlocking_balance += last.value; + self.active += last.value; + self.unlocking.pop(); + } else { + let diff = value.defensive_saturating_sub(unlocking_balance); + + unlocking_balance += diff; + self.active += diff; + last.value -= diff; + } + + if unlocking_balance >= value { + break + } + } + + (self, unlocking_balance) + } + + /// Slash the staker for a given amount of balance. + /// + /// This implements a proportional slashing system, whereby we set our preference to slash as + /// such: + /// + /// - If any unlocking chunks exist that are scheduled to be unlocked at `slash_era + + /// bonding_duration` and onwards, the slash is divided equally between the active ledger and + /// the unlocking chunks. + /// - If no such chunks exist, then only the active balance is slashed. + /// + /// Note that the above is only a *preference*. If for any reason the active ledger, with or + /// without some portion of the unlocking chunks that are more justified to be slashed are not + /// enough, then the slashing will continue and will consume as much of the active and unlocking + /// chunks as needed. + /// + /// This will never slash more than the given amount. If any of the chunks become dusted, the + /// last chunk is slashed slightly less to compensate. Returns the amount of funds actually + /// slashed. + /// + /// `slash_era` is the era in which the slash (which is being enacted now) actually happened. + /// + /// This calls `Config::OnStakingUpdate::on_slash` with information as to how the slash was + /// applied. + pub fn slash( + &mut self, + slash_amount: BalanceOf, + minimum_balance: BalanceOf, + slash_era: EraIndex, + ) -> BalanceOf { + if slash_amount.is_zero() { + return Zero::zero() + } + + use sp_runtime::PerThing as _; + let mut remaining_slash = slash_amount; + let pre_slash_total = self.total; + + // for a `slash_era = x`, any chunk that is scheduled to be unlocked at era `x + 28` + // (assuming 28 is the bonding duration) onwards should be slashed. + let slashable_chunks_start = slash_era.saturating_add(T::BondingDuration::get()); + + // `Some(ratio)` if this is proportional, with `ratio`, `None` otherwise. In both cases, we + // slash first the active chunk, and then `slash_chunks_priority`. + let (maybe_proportional, slash_chunks_priority) = { + if let Some(first_slashable_index) = + self.unlocking.iter().position(|c| c.era >= slashable_chunks_start) + { + // If there exists a chunk who's after the first_slashable_start, then this is a + // proportional slash, because we want to slash active and these chunks + // proportionally. + + // The indices of the first chunk after the slash up through the most recent chunk. + // (The most recent chunk is at greatest from this era) + let affected_indices = first_slashable_index..self.unlocking.len(); + let unbonding_affected_balance = + affected_indices.clone().fold(BalanceOf::::zero(), |sum, i| { + if let Some(chunk) = self.unlocking.get(i).defensive() { + sum.saturating_add(chunk.value) + } else { + sum + } + }); + let affected_balance = self.active.saturating_add(unbonding_affected_balance); + let ratio = Perquintill::from_rational_with_rounding( + slash_amount, + affected_balance, + Rounding::Up, + ) + .unwrap_or_else(|_| Perquintill::one()); + ( + Some(ratio), + affected_indices.chain((0..first_slashable_index).rev()).collect::>(), + ) + } else { + // We just slash from the last chunk to the most recent one, if need be. + (None, (0..self.unlocking.len()).rev().collect::>()) + } + }; + + // Helper to update `target` and the ledgers total after accounting for slashing `target`. + log!( + trace, + "slashing {:?} for era {:?} out of {:?}, priority: {:?}, proportional = {:?}", + slash_amount, + slash_era, + self, + slash_chunks_priority, + maybe_proportional, + ); + + let mut slash_out_of = |target: &mut BalanceOf, slash_remaining: &mut BalanceOf| { + let mut slash_from_target = if let Some(ratio) = maybe_proportional { + ratio.mul_ceil(*target) + } else { + *slash_remaining + } + // this is the total that that the slash target has. We can't slash more than + // this anyhow! + .min(*target) + // this is the total amount that we would have wanted to slash + // non-proportionally, a proportional slash should never exceed this either! + .min(*slash_remaining); + + // slash out from *target exactly `slash_from_target`. + *target = *target - slash_from_target; + if *target < minimum_balance { + // Slash the rest of the target if it's dust. This might cause the last chunk to be + // slightly under-slashed, by at most `MaxUnlockingChunks * ED`, which is not a big + // deal. + slash_from_target = + core::mem::replace(target, Zero::zero()).saturating_add(slash_from_target) + } + + self.total = self.total.saturating_sub(slash_from_target); + *slash_remaining = slash_remaining.saturating_sub(slash_from_target); + }; + + // If this is *not* a proportional slash, the active will always wiped to 0. + slash_out_of(&mut self.active, &mut remaining_slash); + + let mut slashed_unlocking = BTreeMap::<_, _>::new(); + for i in slash_chunks_priority { + if remaining_slash.is_zero() { + break + } + + if let Some(chunk) = self.unlocking.get_mut(i).defensive() { + slash_out_of(&mut chunk.value, &mut remaining_slash); + // write the new slashed value of this chunk to the map. + slashed_unlocking.insert(chunk.era, chunk.value); + } else { + break + } + } + + // clean unlocking chunks that are set to zero. + self.unlocking.retain(|c| !c.value.is_zero()); + + let final_slashed_amount = pre_slash_total.saturating_sub(self.total); + T::EventListeners::on_slash( + &self.stash, + self.active, + &slashed_unlocking, + final_slashed_amount, + ); + final_slashed_amount + } +} + +/// State of a ledger with regards with its data and metadata integrity. +#[derive(PartialEq, Debug)] +pub(crate) enum LedgerIntegrityState { + /// Ledger, bond and corresponding staking lock is OK. + Ok, + /// Ledger and/or bond is corrupted. This means that the bond has a ledger with a different + /// stash than the bonded stash. + Corrupted, + /// Ledger was corrupted and it has been killed. + CorruptedKilled, + /// Ledger and bond are OK, however the ledger's stash lock is out of sync. + LockCorrupted, +} + +// This structs makes it easy to write tests to compare staking ledgers fetched from storage. This +// is required because the controller field is not stored in storage and it is private. +#[cfg(test)] +#[derive(frame_support::DebugNoBound, Clone, Encode, Decode, TypeInfo, MaxEncodedLen)] +pub struct StakingLedgerInspect { + pub stash: T::AccountId, + #[codec(compact)] + pub total: BalanceOf, + #[codec(compact)] + pub active: BalanceOf, + pub unlocking: + frame_support::BoundedVec>, T::MaxUnlockingChunks>, +} + +#[cfg(test)] +impl PartialEq> for StakingLedger { + fn eq(&self, other: &StakingLedgerInspect) -> bool { + self.stash == other.stash && + self.total == other.total && + self.active == other.active && + self.unlocking == other.unlocking + } +} + +#[cfg(test)] +impl codec::EncodeLike> for StakingLedgerInspect {} diff --git a/substrate/frame/staking-async/src/lib.rs b/substrate/frame/staking-async/src/lib.rs new file mode 100644 index 0000000000000..76c7b15619b8e --- /dev/null +++ b/substrate/frame/staking-async/src/lib.rs @@ -0,0 +1,425 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Staking Async Pallet +//! +//! This pallet is a fork of the original `pallet-staking`, with a number of key differences: +//! +//! * It no longer has access to a secure timestamp, previously used to calculate the duration of an +//! era. +//! * It no longer has access to a pallet-session. +//! * It no longer has access to a pallet-authorship. +//! * It is capable of working with a multi-page `ElectionProvider``, aka. +//! `pallet-election-provider-multi-block`. +//! +//! While `pallet-staking` was somewhat general-purpose, this pallet is absolutely NOT right from +//! the get-go: It is designed to be used ONLY in Polkadot/Kusama AssetHub system parachains. +//! +//! The workings of this pallet can be divided into a number of subsystems, as follows. +//! +//! ## User Interactions +//! +//! TODO +//! +//! ## Session and Era Rotation +//! +//! TODO +//! +//! ## Exposure Collection +//! +//! TODO +//! +//! ## Slashing of Validators and Exposures +//! +//! TODO + +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; +#[cfg(any(feature = "runtime-benchmarks", test))] +pub mod testing_utils; + +#[cfg(test)] +pub(crate) mod mock; +#[cfg(test)] +mod tests; + +pub mod asset; +pub mod election_size_tracker; +pub mod ledger; +mod pallet; +pub mod session_rotation; +pub mod slashing; +pub mod weights; + +extern crate alloc; +use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; +use codec::{Decode, DecodeWithMemTracking, Encode, HasCompact, MaxEncodedLen}; +use frame_election_provider_support::ElectionProvider; +use frame_support::{ + traits::{ + tokens::fungible::{Credit, Debt}, + ConstU32, Contains, Get, LockIdentifier, + }, + BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, WeakBoundedVec, +}; +use ledger::LedgerIntegrityState; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, StaticLookup}, + Perbill, RuntimeDebug, +}; +use sp_staking::{EraIndex, ExposurePage, PagedExposureMetadata}; +pub use sp_staking::{Exposure, IndividualExposure, StakerStatus}; +pub use weights::WeightInfo; + +// public exports +pub use ledger::{StakingLedger, UnlockChunk}; +pub use pallet::{pallet::*, UseNominatorsAndValidatorsMap, UseValidatorsMap}; + +pub(crate) const STAKING_ID: LockIdentifier = *b"staking "; +pub(crate) const LOG_TARGET: &str = "runtime::staking-async"; + +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: crate::LOG_TARGET, + concat!("[{:?}] 💸 ", $patter), >::block_number() $(, $values)* + ) + }; +} + +/// Alias for a bounded set of exposures behind a validator, parameterized by this pallet's +/// election provider. +pub type BoundedExposuresOf = BoundedVec< + ( + ::AccountId, + Exposure<::AccountId, BalanceOf>, + ), + MaxWinnersPerPageOf<::ElectionProvider>, +>; + +/// Alias for the maximum number of winners (aka. active validators), as defined in by this pallet's +/// config. +pub type MaxWinnersOf = ::MaxValidatorSet; + +/// Alias for the maximum number of winners per page, as expected by the election provider. +pub type MaxWinnersPerPageOf

=

::MaxWinnersPerPage; + +/// Maximum number of nominations per nominator. +pub type MaxNominationsOf = + <::NominationsQuota as NominationsQuota>>::MaxNominations; + +/// Counter for the number of "reward" points earned by a given validator. +pub type RewardPoint = u32; + +/// The balance type of this pallet. +pub type BalanceOf = ::CurrencyBalance; + +type PositiveImbalanceOf = Debt<::AccountId, ::Currency>; +pub type NegativeImbalanceOf = + Credit<::AccountId, ::Currency>; + +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + +/// Information regarding the active era (era in used in session). +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen, PartialEq, Eq, Clone)] +pub struct ActiveEraInfo { + /// Index of era. + pub index: EraIndex, + /// Moment of start expressed as millisecond from `$UNIX_EPOCH`. + /// + /// Start can be none if start hasn't been set for the era yet, + /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. + pub start: Option, +} + +/// Reward points of an era. Used to split era total payout between validators. +/// +/// This points will be used to reward validators and their respective nominators. +#[derive(PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct EraRewardPoints { + /// Total number of points. Equals the sum of reward points for each validator. + pub total: RewardPoint, + /// The reward points earned by a given validator. + pub individual: BTreeMap, +} + +impl Default for EraRewardPoints { + fn default() -> Self { + EraRewardPoints { total: Default::default(), individual: BTreeMap::new() } + } +} + +/// A destination account for payment. +#[derive( + PartialEq, + Eq, + Copy, + Clone, + Encode, + Decode, + DecodeWithMemTracking, + RuntimeDebug, + TypeInfo, + MaxEncodedLen, +)] +pub enum RewardDestination { + /// Pay into the stash account, increasing the amount at stake accordingly. + Staked, + /// Pay into the stash account, not increasing the amount at stake. + Stash, + #[deprecated( + note = "`Controller` will be removed after January 2024. Use `Account(controller)` instead." + )] + Controller, + /// Pay into a specified account. + Account(AccountId), + /// Receive no reward. + None, +} + +/// Preference of what happens regarding validation. +#[derive( + PartialEq, + Eq, + Clone, + Encode, + Decode, + DecodeWithMemTracking, + RuntimeDebug, + TypeInfo, + Default, + MaxEncodedLen, +)] +pub struct ValidatorPrefs { + /// Reward that validator takes up-front; only the rest is split between themselves and + /// nominators. + #[codec(compact)] + pub commission: Perbill, + /// Whether or not this validator is accepting more nominations. If `true`, then no nominator + /// who is not already nominating this validator may nominate them. By default, validators + /// are accepting nominations. + pub blocked: bool, +} + +/// Status of a paged snapshot progress. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen, Default)] +pub enum SnapshotStatus { + /// Paged snapshot is in progress, the `AccountId` was the last staker iterated in the list. + Ongoing(AccountId), + /// All the stakers in the system have been consumed since the snapshot started. + Consumed, + /// Waiting for a new snapshot to be requested. + #[default] + Waiting, +} + +/// A record of the nominations made by a specific account. +#[derive( + PartialEqNoBound, EqNoBound, Clone, Encode, Decode, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen, +)] +#[codec(mel_bound())] +#[scale_info(skip_type_params(T))] +pub struct Nominations { + /// The targets of nomination. + pub targets: BoundedVec>, + /// The era the nominations were submitted. + /// + /// Except for initial nominations which are considered submitted at era 0. + pub submitted_in: EraIndex, + /// Whether the nominations have been suppressed. This can happen due to slashing of the + /// validators, or other events that might invalidate the nomination. + /// + /// NOTE: this for future proofing and is thus far not used. + pub suppressed: bool, +} + +/// Facade struct to encapsulate `PagedExposureMetadata` and a single page of `ExposurePage`. +/// +/// This is useful where we need to take into account the validator's own stake and total exposure +/// in consideration, in addition to the individual nominators backing them. +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Eq)] +pub struct PagedExposure { + exposure_metadata: PagedExposureMetadata, + exposure_page: ExposurePage, +} + +impl + PagedExposure +{ + /// Create a new instance of `PagedExposure` from legacy clipped exposures. + pub fn from_clipped(exposure: Exposure) -> Self { + Self { + exposure_metadata: PagedExposureMetadata { + total: exposure.total, + own: exposure.own, + nominator_count: exposure.others.len() as u32, + page_count: 1, + }, + exposure_page: ExposurePage { page_total: exposure.total, others: exposure.others }, + } + } + + /// Returns total exposure of this validator across pages + pub fn total(&self) -> Balance { + self.exposure_metadata.total + } + + /// Returns total exposure of this validator for the current page + pub fn page_total(&self) -> Balance { + self.exposure_page.page_total + self.exposure_metadata.own + } + + /// Returns validator's own stake that is exposed + pub fn own(&self) -> Balance { + self.exposure_metadata.own + } + + /// Returns the portions of nominators stashes that are exposed in this page. + pub fn others(&self) -> &Vec> { + &self.exposure_page.others + } +} + +/// A pending slash record. The value of the slash has been computed but not applied yet, +/// rather deferred for several eras. +#[derive(Encode, Decode, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen, PartialEqNoBound)] +#[scale_info(skip_type_params(T))] +pub struct UnappliedSlash { + /// The stash ID of the offending validator. + validator: T::AccountId, + /// The validator's own slash. + own: BalanceOf, + /// All other slashed stakers and amounts. + others: WeakBoundedVec<(T::AccountId, BalanceOf), T::MaxExposurePageSize>, + /// Reporters of the offence; bounty payout recipients. + reporter: Option, + /// The amount of payout. + payout: BalanceOf, +} + +/// Something that defines the maximum number of nominations per nominator based on a curve. +/// +/// The method `curve` implements the nomination quota curve and should not be used directly. +/// However, `get_quota` returns the bounded maximum number of nominations based on `fn curve` and +/// the nominator's balance. +pub trait NominationsQuota { + /// Strict maximum number of nominations that caps the nominations curve. This value can be + /// used as the upper bound of the number of votes per nominator. + type MaxNominations: Get; + + /// Returns the voter's nomination quota within reasonable bounds [`min`, `max`], where `min` + /// is 1 and `max` is `Self::MaxNominations`. + fn get_quota(balance: Balance) -> u32 { + Self::curve(balance).clamp(1, Self::MaxNominations::get()) + } + + /// Returns the voter's nomination quota based on its balance and a curve. + fn curve(balance: Balance) -> u32; +} + +/// A nomination quota that allows up to MAX nominations for all validators. +pub struct FixedNominationsQuota; +impl NominationsQuota for FixedNominationsQuota { + type MaxNominations = ConstU32; + + fn curve(_: Balance) -> u32 { + MAX + } +} + +/// Handler for determining how much of a balance should be paid out on the current era. +pub trait EraPayout { + /// Determine the payout for this era. + /// + /// Returns the amount to be paid to stakers in this era, as well as whatever else should be + /// paid out ("the rest"). + fn era_payout( + total_staked: Balance, + total_issuance: Balance, + era_duration_millis: u64, + ) -> (Balance, Balance); +} + +impl EraPayout for () { + fn era_payout( + _total_staked: Balance, + _total_issuance: Balance, + _era_duration_millis: u64, + ) -> (Balance, Balance) { + (Default::default(), Default::default()) + } +} + +/// Mode of era-forcing. +#[derive( + Copy, + Clone, + PartialEq, + Eq, + Encode, + Decode, + DecodeWithMemTracking, + RuntimeDebug, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum Forcing { + /// Not forcing anything - just let whatever happen. + NotForcing, + /// Force a new era, then reset to `NotForcing` as soon as it is done. + /// Note that this will force to trigger an election until a new era is triggered, if the + /// election failed, the next session end will trigger a new election again, until success. + ForceNew, + /// Avoid a new era indefinitely. + ForceNone, + /// Force a new era at the end of all sessions indefinitely. + ForceAlways, +} + +impl Default for Forcing { + fn default() -> Self { + Forcing::NotForcing + } +} + +/// A utility struct that provides a way to check if a given account is a staker. +/// +/// This struct implements the `Contains` trait, allowing it to determine whether +/// a particular account is currently staking by checking if the account exists in +/// the staking ledger. +/// +/// Intended to be used in [`crate::Config::Filter`]. +pub struct AllStakers(core::marker::PhantomData); + +impl Contains for AllStakers { + /// Checks if the given account ID corresponds to a staker. + /// + /// # Returns + /// - `true` if the account has an entry in the staking ledger (indicating it is staking). + /// - `false` otherwise. + fn contains(account: &T::AccountId) -> bool { + Ledger::::contains_key(account) + } +} diff --git a/substrate/frame/staking-async/src/mock.rs b/substrate/frame/staking-async/src/mock.rs new file mode 100644 index 0000000000000..ed479123f93e4 --- /dev/null +++ b/substrate/frame/staking-async/src/mock.rs @@ -0,0 +1,969 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities + +use crate::{ + self as pallet_staking_async, + session_rotation::{Eras, Rotator}, + *, +}; + +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, BoundedSupports, BoundedSupportsOf, ElectionProvider, PageIndex, SequentialPhragmen, + Support, VoteWeight, +}; +use frame_support::{ + assert_ok, derive_impl, ord_parameter_types, parameter_types, + traits::{EitherOfDiverse, Get, Imbalance, OnUnbalanced}, + weights::constants::RocksDbWeight, +}; +use frame_system::{pallet_prelude::BlockNumberFor, EnsureRoot, EnsureSignedBy}; +use pallet_staking_async_rc_client as rc_client; +use sp_core::ConstBool; +use sp_io; +use sp_npos_elections::BalancingConfig; +use sp_runtime::{traits::Zero, BuildStorage}; +use sp_staking::{ + currency_to_vote::SaturatingCurrencyToVote, OnStakingUpdate, SessionIndex, StakingAccount, +}; + +pub(crate) const INIT_TIMESTAMP: u64 = 30_000; +pub(crate) const BLOCK_TIME: u64 = 1000; + +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + Balances: pallet_balances, + Staking: pallet_staking_async, + VoterBagsList: pallet_bags_list::, + } +); + +pub(crate) type T = Test; +pub(crate) type Runtime = Test; +pub(crate) type AccountId = ::AccountId; +pub(crate) type BlockNumber = BlockNumberFor; +pub(crate) type Balance = ::Balance; + +parameter_types! { + pub static ExistentialDeposit: Balance = 1; + pub static SlashDeferDuration: EraIndex = 0; + pub static MaxControllersInDeprecationBatch: u32 = 5900; + pub static BondingDuration: EraIndex = 3; + pub static HistoryDepth: u32 = 80; + pub static MaxExposurePageSize: u32 = 64; + pub static MaxUnlockingChunks: u32 = 32; + pub static RewardOnUnbalanceWasCalled: bool = false; + pub static MaxValidatorSet: u32 = 100; + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); + pub static AbsoluteMaxNominations: u32 = 16; + pub static PlanningEraOffset: u32 = 1; + // Session configs + pub static SessionsPerEra: SessionIndex = 3; + pub static Period: BlockNumber = 5; + pub static Offset: BlockNumber = 0; +} + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Test { + type DbWeight = RocksDbWeight; + type Block = frame_system::mocking::MockBlock; + type AccountData = pallet_balances::AccountData; +} +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] +impl pallet_balances::Config for Test { + type MaxLocks = frame_support::traits::ConstU32<1024>; + type Balance = u128; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} + +parameter_types! { + pub static RewardRemainderUnbalanced: u128 = 0; +} +pub struct RewardRemainderMock; +impl OnUnbalanced> for RewardRemainderMock { + fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { + RewardRemainderUnbalanced::mutate(|v| { + *v += amount.peek(); + }); + drop(amount); + } +} + +pub(crate) const THRESHOLDS: [sp_npos_elections::VoteWeight; 9] = + [10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; + +parameter_types! { + pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; +} + +pub type VoterBagsListInstance = pallet_bags_list::Instance1; +impl pallet_bags_list::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + // Staking is the source of truth for voter bags list, since they are not kept up to date. + type ScoreProvider = Staking; + type BagThresholds = BagThresholds; + type Score = VoteWeight; +} + +// multi-page types and controller. +parameter_types! { + pub static Pages: PageIndex = 1; + pub static MaxBackersPerWinner: u32 = 256; + pub static MaxWinnersPerPage: u32 = MaxValidatorSet::get(); + pub static StartReceived: bool = false; +} + +pub type InnerElection = onchain::OnChainExecution; +pub struct Balancing; +impl Get> for Balancing { + fn get() -> Option { + Some(BalancingConfig { iterations: 5, tolerance: 0 }) + } +} + +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type System = Test; + type Solver = SequentialPhragmen; + type DataProvider = Staking; + type WeightInfo = (); + type Bounds = ElectionsBounds; + type Sort = ConstBool; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; +} + +pub struct TestElectionProvider; +impl ElectionProvider for TestElectionProvider { + type AccountId = AccountId; + type BlockNumber = BlockNumber; + type MaxWinnersPerPage = MaxWinnersPerPage; + type MaxBackersPerWinner = MaxBackersPerWinner; + type Pages = Pages; + type DataProvider = Staking; + type Error = onchain::Error; + + fn elect(page: PageIndex) -> Result, Self::Error> { + if page == 0 { + StartReceived::set(false); + } + InnerElection::elect(page) + } + fn start() -> Result<(), Self::Error> { + StartReceived::set(true); + Ok(()) + } + fn duration() -> Self::BlockNumber { + InnerElection::duration() + } + fn status() -> Result { + if StartReceived::get() { + Ok(true) + } else { + Err(()) + } + } +} +pub struct MockReward {} +impl OnUnbalanced> for MockReward { + fn on_unbalanced(_: PositiveImbalanceOf) { + RewardOnUnbalanceWasCalled::set(true); + } +} + +parameter_types! { + pub static LedgerSlashPerEra: + (BalanceOf, BTreeMap>) = + (Zero::zero(), BTreeMap::new()); + pub static SlashObserver: BTreeMap> = BTreeMap::new(); + pub static RestrictedAccounts: Vec = Vec::new(); +} + +pub struct EventListenerMock; +impl OnStakingUpdate for EventListenerMock { + fn on_slash( + pool_account: &AccountId, + slashed_bonded: Balance, + slashed_chunks: &BTreeMap, + total_slashed: Balance, + ) { + LedgerSlashPerEra::set((slashed_bonded, slashed_chunks.clone())); + SlashObserver::mutate(|map| { + map.insert(*pool_account, map.get(pool_account).unwrap_or(&0) + total_slashed) + }); + } +} + +pub struct MockedRestrictList; +impl Contains for MockedRestrictList { + fn contains(who: &AccountId) -> bool { + RestrictedAccounts::get().contains(who) + } +} + +/// A representation of the session pallet that lives on the relay chain. +pub mod session_mock { + use super::*; + use pallet_staking_async_rc_client::ValidatorSetReport; + + pub struct Session; + + impl Session { + pub fn queued_validators() -> Option> { + Queued::get() + } + + pub fn validators() -> Vec { + Active::get() + } + + pub fn current_index() -> SessionIndex { + CurrentIndex::get() + } + + pub fn roll_until(block: BlockNumber) { + while System::block_number() < block { + Self::roll_next(); + } + } + + pub fn roll_next() { + let now = System::block_number(); + Timestamp::mutate(|ts| *ts += BLOCK_TIME); + System::run_to_block::(now + 1); + Self::maybe_rotate_session_now(); + } + + pub fn roll_to_next_session() { + let current = Self::current_index(); + while Self::current_index() != (current + 1) { + Self::roll_next(); + } + } + + pub fn roll_until_session(session: SessionIndex) { + while Self::current_index() != session { + Self::roll_next(); + } + } + + pub fn roll_until_active_era(era: EraIndex) { + while active_era() != era { + Self::roll_next(); + } + } + + fn maybe_rotate_session_now() { + let now = System::block_number(); + let period = Period::get(); + if now % period == 0 { + Self::advance_session(); + } + } + + fn advance_session() { + let ending = Self::current_index(); + if let Some((q, id)) = Queued::get().zip(QueuedId::get()) { + // we have something queued + if QueuedBufferSessions::get() == 0 { + // buffer time has passed + Active::set(q); + Rotator::::end_session(ending, Some((Timestamp::get(), id))); + Queued::reset(); + QueuedId::reset(); + } else { + QueuedBufferSessions::mutate(|s| *s -= 1); + Rotator::::end_session(ending, None); + } + } else { + // just end the session. + Rotator::::end_session(ending, None); + } + CurrentIndex::set(ending + 1); + } + } + + parameter_types! { + pub static ReceivedValidatorSets + : BTreeMap> + = BTreeMap::new(); + pub static Queued: Option> = None; + pub static QueuedId: Option = None; + pub static QueuedBufferSessions: BlockNumber = 1; + pub static Active: Vec = Vec::new(); + pub static CurrentIndex: u32 = 0; + pub static Timestamp: u64 = INIT_TIMESTAMP; + } + + impl ReceivedValidatorSets { + pub fn get_last() -> ValidatorSetReport { + let mut data = Self::get(); + data.pop_last().unwrap().1 + } + } + + impl pallet_staking_async_rc_client::RcClientInterface for Session { + type AccountId = AccountId; + + fn validator_set( + new_validator_set: Vec, + id: u32, + prune_up_to: Option, + ) { + log::debug!(target: "runtime::session_mock", "Received validator set: {:?}", new_validator_set); + let now = System::block_number(); + // store the report for further inspection. + ReceivedValidatorSets::mutate(|reports| { + reports.insert( + now, + ValidatorSetReport { + id, + new_validator_set: new_validator_set.clone(), + prune_up_to, + leftover: false, + }, + ); + }); + + // queue the validator set. + Queued::set(Some(new_validator_set)); + QueuedId::set(Some(id)); + QueuedBufferSessions::set(1); + } + } +} + +pub use session_mock::Session; + +ord_parameter_types! { + pub const One: u64 = 1; +} + +parameter_types! { + pub static RemainderRatio: Perbill = Perbill::from_percent(50); +} +pub struct OneTokenPerMillisecond; +impl EraPayout for OneTokenPerMillisecond { + fn era_payout( + _total_staked: Balance, + _total_issuance: Balance, + era_duration_millis: u64, + ) -> (Balance, Balance) { + let total = era_duration_millis as Balance; + let remainder = RemainderRatio::get() * total; + let stakers = total - remainder; + (stakers, remainder) + } +} + +impl crate::pallet::pallet::Config for Test { + type RuntimeHoldReason = RuntimeHoldReason; + type OldCurrency = Balances; + type Currency = Balances; + type RewardRemainder = RewardRemainderMock; + type Reward = MockReward; + type SessionsPerEra = SessionsPerEra; + type SlashDeferDuration = SlashDeferDuration; + type AdminOrigin = EitherOfDiverse, EnsureSignedBy>; + type EraPayout = OneTokenPerMillisecond; + type MaxExposurePageSize = MaxExposurePageSize; + type MaxValidatorSet = MaxValidatorSet; + type ElectionProvider = TestElectionProvider; + type VoterList = VoterBagsList; + type TargetList = UseValidatorsMap; + type NominationsQuota = WeightedNominationsQuota<16>; + type MaxUnlockingChunks = MaxUnlockingChunks; + type HistoryDepth = HistoryDepth; + type BondingDuration = BondingDuration; + type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; + type EventListeners = EventListenerMock; + type MaxInvulnerables = ConstU32<20>; + type MaxDisabledValidators = ConstU32<100>; + type PlanningEraOffset = PlanningEraOffset; + type Filter = MockedRestrictList; + type RcClientInterface = session_mock::Session; + type CurrencyBalance = Balance; + type CurrencyToVote = SaturatingCurrencyToVote; + type Slash = (); + type WeightInfo = (); +} + +pub struct WeightedNominationsQuota; +impl NominationsQuota for WeightedNominationsQuota +where + u128: From, +{ + type MaxNominations = AbsoluteMaxNominations; + + fn curve(balance: Balance) -> u32 { + match balance.into() { + // random curve for testing. + 0..=110 => MAX, + 111 => 0, + 222 => 2, + 333 => MAX + 10, + _ => MAX, + } + } +} + +parameter_types! { + // if true, skips the try-state for the test running. + pub static SkipTryStateCheck: bool = false; +} + +pub struct ExtBuilder { + nominate: bool, + validator_count: u32, + invulnerables: BoundedVec::MaxInvulnerables>, + has_stakers: bool, + pub min_nominator_bond: Balance, + min_validator_bond: Balance, + balance_factor: Balance, + status: BTreeMap>, + stakes: BTreeMap, + stakers: Vec<(AccountId, Balance, StakerStatus)>, + flush_events: bool, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { + nominate: true, + validator_count: 2, + balance_factor: 1, + invulnerables: BoundedVec::new(), + has_stakers: true, + min_nominator_bond: ExistentialDeposit::get(), + min_validator_bond: ExistentialDeposit::get(), + status: Default::default(), + stakes: Default::default(), + stakers: Default::default(), + flush_events: true, + } + } +} + +#[allow(unused)] +impl ExtBuilder { + pub(crate) fn existential_deposit(self, existential_deposit: Balance) -> Self { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = existential_deposit); + self + } + pub(crate) fn max_unlock_chunks(self, max: u32) -> Self { + MaxUnlockingChunks::set(max); + self + } + pub(crate) fn bonding_duration(self, bonding_duration: EraIndex) -> Self { + BondingDuration::set(bonding_duration); + self + } + pub(crate) fn planning_era_offset(self, offset: SessionIndex) -> Self { + PlanningEraOffset::set(offset); + self + } + pub(crate) fn nominate(mut self, nominate: bool) -> Self { + self.nominate = nominate; + self + } + pub(crate) fn no_flush_events(mut self) -> Self { + self.flush_events = false; + self + } + pub(crate) fn validator_count(mut self, count: u32) -> Self { + self.validator_count = count; + self + } + pub(crate) fn slash_defer_duration(self, eras: EraIndex) -> Self { + SlashDeferDuration::set(eras); + self + } + pub(crate) fn invulnerables(mut self, invulnerables: Vec) -> Self { + self.invulnerables = BoundedVec::try_from(invulnerables) + .expect("Too many invulnerable validators: upper limit is MaxInvulnerables"); + self + } + pub(crate) fn session_per_era(self, length: SessionIndex) -> Self { + SessionsPerEra::set(length); + self + } + pub(crate) fn period(self, length: BlockNumber) -> Self { + Period::set(length); + self + } + pub(crate) fn has_stakers(mut self, has: bool) -> Self { + self.has_stakers = has; + self + } + pub(crate) fn offset(self, offset: BlockNumber) -> Self { + OFFSET.with(|v| *v.borrow_mut() = offset); + self + } + pub(crate) fn min_nominator_bond(mut self, amount: Balance) -> Self { + self.min_nominator_bond = amount; + self + } + pub(crate) fn min_validator_bond(mut self, amount: Balance) -> Self { + self.min_validator_bond = amount; + self + } + pub(crate) fn set_status(mut self, who: AccountId, status: StakerStatus) -> Self { + self.status.insert(who, status); + self + } + pub(crate) fn set_stake(mut self, who: AccountId, stake: Balance) -> Self { + self.stakes.insert(who, stake); + self + } + pub(crate) fn add_staker( + mut self, + stash: AccountId, + stake: Balance, + status: StakerStatus, + ) -> Self { + self.stakers.push((stash, stake, status)); + self + } + pub(crate) fn exposures_page_size(self, max: u32) -> Self { + MaxExposurePageSize::set(max); + self + } + pub(crate) fn balance_factor(mut self, factor: Balance) -> Self { + self.balance_factor = factor; + self + } + pub(crate) fn multi_page_election_provider(self, pages: PageIndex) -> Self { + Pages::set(pages); + self + } + pub(crate) fn election_bounds(self, voter_count: u32, target_count: u32) -> Self { + let bounds = ElectionBoundsBuilder::default() + .voters_count(voter_count.into()) + .targets_count(target_count.into()) + .build(); + ElectionsBounds::set(bounds); + self + } + pub(crate) fn max_winners_per_page(self, max: u32) -> Self { + MaxWinnersPerPage::set(max); + self + } + pub(crate) fn try_state(self, enable: bool) -> Self { + SkipTryStateCheck::set(!enable); + self + } + fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let ed = ExistentialDeposit::get(); + + let mut maybe_stakers = vec![]; + if self.has_stakers { + maybe_stakers = vec![ + // (stash, stake, status) + // these two will be elected in the default test where we elect 2. + (11, self.balance_factor * 1000, StakerStatus::::Validator), + (21, self.balance_factor * 1000, StakerStatus::::Validator), + // a loser validator + (31, self.balance_factor * 500, StakerStatus::::Validator), + // idle stakers + (41, self.balance_factor * 4000, StakerStatus::::Idle), + (51, self.balance_factor * 5000, StakerStatus::::Idle), + ]; // optionally add a nominator + if self.nominate { + maybe_stakers.push(( + 101, + self.balance_factor * 500, + StakerStatus::::Nominator(vec![11, 21]), + )) + } + // replace any of the status if needed. + self.status.into_iter().for_each(|(stash, status)| { + let (_, _, ref mut prev_status) = maybe_stakers + .iter_mut() + .find(|s| s.0 == stash) + .expect("set_status staker should exist; qed"); + *prev_status = status; + }); + // replaced any of the stakes if needed. + self.stakes.into_iter().for_each(|(stash, stake)| { + let (_, ref mut prev_stake, _) = maybe_stakers + .iter_mut() + .find(|s| s.0 == stash) + .expect("set_stake staker should exits; qed."); + *prev_stake = stake; + }); + // extend stakers if needed. + maybe_stakers.extend(self.stakers) + } + + let aux_balances = vec![ + // aux accounts + (1, ed + 10 * self.balance_factor), + (2, ed + 20 * self.balance_factor), + (3, ed + 300 * self.balance_factor), + (4, ed + 400 * self.balance_factor), + // This allows us to have a total_payout different from 0. + (999, 1_000_000_000_000), + ]; + // given each stakers their stake + ed as balance. + let stakers_balances = + maybe_stakers.clone().into_iter().map(|(who, stake, _)| (who, stake + ed)); + let balances = aux_balances.into_iter().chain(stakers_balances).collect::>(); + + let _ = pallet_balances::GenesisConfig:: { balances, ..Default::default() } + .assimilate_storage(&mut storage); + + let _ = pallet_staking_async::GenesisConfig:: { + stakers: maybe_stakers, + validator_count: self.validator_count, + invulnerables: self.invulnerables, + active_era: (0, 0, INIT_TIMESTAMP), + slash_reward_fraction: Perbill::from_percent(10), + min_nominator_bond: self.min_nominator_bond, + min_validator_bond: self.min_validator_bond, + ..Default::default() + } + .assimilate_storage(&mut storage); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + session_mock::Session::roll_until_active_era(1); + RewardRemainderUnbalanced::set(0); + if self.flush_events { + let _ = staking_events_since_last_call(); + } + }); + + ext + } + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + sp_tracing::try_init_simple(); + let mut ext = self.build(); + ext.execute_with(test); + ext.execute_with(|| { + if !SkipTryStateCheck::get() { + Staking::do_try_state(System::block_number()).unwrap(); + } + }); + } +} + +pub(crate) fn active_era() -> EraIndex { + pallet_staking_async::ActiveEra::::get().unwrap().index +} + +pub(crate) fn current_era() -> EraIndex { + pallet_staking_async::CurrentEra::::get().unwrap() +} + +pub(crate) fn bond(who: AccountId, val: Balance) { + let _ = asset::set_stakeable_balance::(&who, val); + assert_ok!(Staking::bond(RuntimeOrigin::signed(who), val, RewardDestination::Stash)); +} + +pub(crate) fn bond_validator(who: AccountId, val: Balance) { + bond(who, val); + assert_ok!(Staking::validate(RuntimeOrigin::signed(who), ValidatorPrefs::default())); +} + +pub(crate) fn bond_nominator(who: AccountId, val: Balance, target: Vec) { + bond(who, val); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(who), target)); +} + +pub(crate) fn bond_virtual_nominator( + who: AccountId, + payee: AccountId, + val: Balance, + target: Vec, +) { + // Bond who virtually. + assert_ok!(::virtual_bond(&who, val, &payee)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(who), target)); +} + +pub(crate) fn validator_payout_for(duration: u64) -> Balance { + let (payout, _rest) = ::EraPayout::era_payout( + pallet_staking_async::ErasTotalStake::::get(active_era()), + pallet_balances::TotalIssuance::::get(), + duration, + ); + assert!(payout > 0); + payout +} + +pub(crate) fn total_payout_for(duration: u64) -> Balance { + let (payout, rest) = ::EraPayout::era_payout( + pallet_staking_async::ErasTotalStake::::get(active_era()), + pallet_balances::TotalIssuance::::get(), + duration, + ); + payout + rest +} + +/// Time it takes to finish a session. +/// +/// Note, if you see `time_per_session() - BLOCK_TIME`, it is fine. This is because we set the +/// timestamp after on_initialize, so the timestamp is always one block old. +pub(crate) fn time_per_session() -> u64 { + Period::get() * BLOCK_TIME +} + +/// Time it takes to finish an era. +pub(crate) fn time_per_era() -> u64 { + time_per_session() * SessionsPerEra::get() as u64 +} + +pub(crate) fn reward_all_elected() { + let rewards = session_mock::Session::validators().into_iter().map(|v| (v, 1)); + >::reward_by_ids(rewards) +} + +pub(crate) fn era_exposures(era: u32) -> Vec<(AccountId, Exposure)> { + ErasStakersOverview::::iter_prefix(era) + .map(|(v, _overview)| (v, Staking::eras_stakers(era, &v))) + .collect::>() +} + +pub(crate) fn session_validators() -> Vec { + Session::validators() +} + +/// Add a slash for who +pub(crate) fn add_slash(who: AccountId) { + let _ = ::on_new_offences( + session_mock::Session::current_index(), + vec![rc_client::Offence { + offender: who, + reporters: vec![], + slash_fraction: Perbill::from_percent(10), + }], + ); +} + +pub(crate) fn add_slash_in_era(who: AccountId, era: EraIndex) { + let _ = ::on_new_offences( + ErasStartSessionIndex::::get(era).unwrap(), + vec![rc_client::Offence { + offender: who, + reporters: vec![], + slash_fraction: Perbill::from_percent(10), + }], + ); +} + +pub(crate) fn add_slash_in_era_with_value(who: AccountId, era: EraIndex, p: Perbill) { + let _ = ::on_new_offences( + ErasStartSessionIndex::::get(era).unwrap(), + vec![rc_client::Offence { offender: who, reporters: vec![], slash_fraction: p }], + ); +} + +pub(crate) fn add_slash_with_percent(who: AccountId, percent: u32) { + let _ = ::on_new_offences( + session_mock::Session::current_index(), + vec![rc_client::Offence { + offender: who, + reporters: vec![], + slash_fraction: Perbill::from_percent(percent), + }], + ); +} + +/// Make all validator and nominator request their payment +pub(crate) fn make_all_reward_payment(era: EraIndex) { + let validators_with_reward = ErasRewardPoints::::get(era) + .individual + .keys() + .cloned() + .collect::>(); + + // reward validators + for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { + let ledger = >::get(&validator_controller).unwrap(); + for page in 0..Eras::::exposure_page_count(era, &ledger.stash) { + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + ledger.stash, + era, + page + )); + } + } +} + +pub(crate) fn bond_controller_stash(controller: AccountId, stash: AccountId) -> Result<(), String> { + >::get(&stash).map_or(Ok(()), |_| Err("stash already bonded"))?; + >::get(&controller).map_or(Ok(()), |_| Err("controller already bonded"))?; + + >::insert(stash, controller); + >::insert(controller, StakingLedger::::default_from(stash)); + >::insert(stash, RewardDestination::Staked); + + Ok(()) +} + +// simulates `set_controller` without corrupted ledger checks for testing purposes. +pub(crate) fn set_controller_no_checks(stash: &AccountId) { + let controller = Bonded::::get(stash).expect("testing stash should be bonded"); + let ledger = Ledger::::get(&controller).expect("testing ledger should exist"); + + Ledger::::remove(&controller); + Ledger::::insert(stash, ledger); + Bonded::::insert(stash, stash); +} + +// simulates `bond_extra` without corrupted ledger checks for testing purposes. +pub(crate) fn bond_extra_no_checks(stash: &AccountId, amount: Balance) { + let controller = Bonded::::get(stash).expect("bond must exist to bond_extra"); + let mut ledger = Ledger::::get(&controller).expect("ledger must exist to bond_extra"); + + let new_total = ledger.total + amount; + let _ = asset::update_stake::(stash, new_total); + ledger.total = new_total; + ledger.active = new_total; + Ledger::::insert(controller, ledger); +} + +pub(crate) fn setup_double_bonded_ledgers() { + let init_ledgers = Ledger::::iter().count(); + + let _ = asset::set_stakeable_balance::(&333, 2000); + let _ = asset::set_stakeable_balance::(&444, 2000); + let _ = asset::set_stakeable_balance::(&555, 2000); + let _ = asset::set_stakeable_balance::(&777, 2000); + + assert_ok!(Staking::bond(RuntimeOrigin::signed(333), 10, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(444), 20, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(555), 20, RewardDestination::Staked)); + // not relevant to the test case, but ensures try-runtime checks pass. + [333, 444, 555] + .iter() + .for_each(|s| Payee::::insert(s, RewardDestination::Staked)); + + // we want to test the case where a controller can also be a stash of another ledger. + // for that, we change the controller/stash bonding so that: + // * 444 becomes controller of 333. + // * 555 becomes controller of 444. + // * 777 becomes controller of 555. + let ledger_333 = Ledger::::get(333).unwrap(); + let ledger_444 = Ledger::::get(444).unwrap(); + let ledger_555 = Ledger::::get(555).unwrap(); + + // 777 becomes controller of 555. + Bonded::::mutate(555, |controller| *controller = Some(777)); + Ledger::::insert(777, ledger_555); + + // 555 becomes controller of 444. + Bonded::::mutate(444, |controller| *controller = Some(555)); + Ledger::::insert(555, ledger_444); + + // 444 becomes controller of 333. + Bonded::::mutate(333, |controller| *controller = Some(444)); + Ledger::::insert(444, ledger_333); + + // 333 is not controller anymore. + Ledger::::remove(333); + + // checks. now we have: + // * +3 ledgers + assert_eq!(Ledger::::iter().count(), 3 + init_ledgers); + + // * stash 333 has controller 444. + assert_eq!(Bonded::::get(333), Some(444)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(333)), Some(444)); + assert_eq!(Ledger::::get(444).unwrap().stash, 333); + + // * stash 444 has controller 555. + assert_eq!(Bonded::::get(444), Some(555)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(444)), Some(555)); + assert_eq!(Ledger::::get(555).unwrap().stash, 444); + + // * stash 555 has controller 777. + assert_eq!(Bonded::::get(555), Some(777)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(555)), Some(777)); + assert_eq!(Ledger::::get(777).unwrap().stash, 555); +} + +#[macro_export] +macro_rules! assert_session_era { + ($session:expr, $era:expr) => { + assert_eq!( + session_mock::Session::current_index(), + $session, + "wrong session {} != {}", + session_mock::Session::current_index(), + $session, + ); + assert_eq!( + CurrentEra::::get().unwrap(), + $era, + "wrong current era {} != {}", + CurrentEra::::get().unwrap(), + $era, + ); + }; +} + +pub(crate) fn staking_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let RuntimeEvent::Staking(inner) = e { Some(inner) } else { None }) + .collect() +} + +parameter_types! { + static StakingEventsIndex: usize = 0; +} + +pub(crate) fn staking_events_since_last_call() -> Vec> { + let all: Vec<_> = System::events() + .into_iter() + .filter_map(|r| if let RuntimeEvent::Staking(inner) = r.event { Some(inner) } else { None }) + .collect(); + let seen = StakingEventsIndex::get(); + StakingEventsIndex::set(all.len()); + all.into_iter().skip(seen).collect() +} + +pub(crate) fn to_bounded_supports( + supports: Vec<(AccountId, Support)>, +) -> BoundedSupports< + AccountId, + <::ElectionProvider as ElectionProvider>::MaxWinnersPerPage, + <::ElectionProvider as ElectionProvider>::MaxBackersPerWinner, +> { + supports.try_into().unwrap() +} + +pub(crate) fn restrict(who: &AccountId) { + if !RestrictedAccounts::get().contains(who) { + RestrictedAccounts::mutate(|l| l.push(*who)); + } +} + +pub(crate) fn remove_from_restrict_list(who: &AccountId) { + RestrictedAccounts::mutate(|l| l.retain(|x| x != who)); +} diff --git a/substrate/frame/staking-async/src/pallet/impls.rs b/substrate/frame/staking-async/src/pallet/impls.rs new file mode 100644 index 0000000000000..9b2d8daecfe66 --- /dev/null +++ b/substrate/frame/staking-async/src/pallet/impls.rs @@ -0,0 +1,1913 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! `pallet-staking-async`'s main `impl` blocks. + +use crate::{ + asset, + election_size_tracker::StaticTracker, + log, + session_rotation::{self, Eras}, + slashing, + weights::WeightInfo, + BalanceOf, Exposure, Forcing, LedgerIntegrityState, MaxNominationsOf, Nominations, + NominationsQuota, PositiveImbalanceOf, RewardDestination, SnapshotStatus, StakingLedger, + ValidatorPrefs, STAKING_ID, +}; +use alloc::{boxed::Box, vec, vec::Vec}; +use frame_election_provider_support::{ + bounds::CountBound, data_provider, DataProviderBounds, ElectionDataProvider, ElectionProvider, + PageIndex, ScoreProvider, SortedListProvider, VoteWeight, VoterOf, +}; +use frame_support::{ + defensive, + dispatch::WithPostDispatchInfo, + pallet_prelude::*, + traits::{ + Defensive, DefensiveSaturating, Get, Imbalance, InspectLockableCurrency, LockableCurrency, + OnUnbalanced, + }, + weights::Weight, +}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use pallet_staking_async_rc_client::{self as rc_client}; +use sp_runtime::{ + traits::{CheckedAdd, Saturating, StaticLookup, Zero}, + ArithmeticError, DispatchResult, Perbill, +}; +use sp_staking::{ + currency_to_vote::CurrencyToVote, + EraIndex, OnStakingUpdate, Page, SessionIndex, Stake, + StakingAccount::{self, Controller, Stash}, + StakingInterface, +}; + +use super::pallet::*; + +use crate::slashing::OffenceRecord; +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +#[cfg(any(test, feature = "try-runtime"))] +use sp_runtime::TryRuntimeError; + +/// The maximum number of iterations that we do whilst iterating over `T::VoterList` in +/// `get_npos_voters`. +/// +/// In most cases, if we want n items, we iterate exactly n times. In rare cases, if a voter is +/// invalid (for any reason) the iteration continues. With this constant, we iterate at most 2 * n +/// times and then give up. +const NPOS_MAX_ITERATIONS_COEFFICIENT: u32 = 2; + +impl Pallet { + /// Fetches the ledger associated with a controller or stash account, if any. + pub fn ledger(account: StakingAccount) -> Result, Error> { + StakingLedger::::get(account) + } + + pub fn payee(account: StakingAccount) -> Option> { + StakingLedger::::reward_destination(account) + } + + /// Fetches the controller bonded to a stash account, if any. + pub fn bonded(stash: &T::AccountId) -> Option { + StakingLedger::::paired_account(Stash(stash.clone())) + } + + /// Inspects and returns the corruption state of a ledger and direct bond, if any. + /// + /// Note: all operations in this method access directly the `Bonded` and `Ledger` storage maps + /// instead of using the [`StakingLedger`] API since the bond and/or ledger may be corrupted. + /// It is also meant to check state for direct bonds and may not work as expected for virtual + /// bonds. + pub(crate) fn inspect_bond_state( + stash: &T::AccountId, + ) -> Result> { + // look at any old unmigrated lock as well. + let hold_or_lock = asset::staked::(&stash) + .max(T::OldCurrency::balance_locked(STAKING_ID, &stash).into()); + + let controller = >::get(stash).ok_or_else(|| { + if hold_or_lock == Zero::zero() { + Error::::NotStash + } else { + Error::::BadState + } + })?; + + match Ledger::::get(controller) { + Some(ledger) => + if ledger.stash != *stash { + Ok(LedgerIntegrityState::Corrupted) + } else { + if hold_or_lock != ledger.total { + Ok(LedgerIntegrityState::LockCorrupted) + } else { + Ok(LedgerIntegrityState::Ok) + } + }, + None => Ok(LedgerIntegrityState::CorruptedKilled), + } + } + + /// The total balance that can be slashed from a stash account as of right now. + pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { + // Weight note: consider making the stake accessible through stash. + Self::ledger(Stash(stash.clone())).map(|l| l.active).unwrap_or_default() + } + + /// Internal impl of [`Self::slashable_balance_of`] that returns [`VoteWeight`]. + pub fn slashable_balance_of_vote_weight( + stash: &T::AccountId, + issuance: BalanceOf, + ) -> VoteWeight { + T::CurrencyToVote::to_vote(Self::slashable_balance_of(stash), issuance) + } + + /// Returns a closure around `slashable_balance_of_vote_weight` that can be passed around. + /// + /// This prevents call sites from repeatedly requesting `total_issuance` from backend. But it is + /// important to be only used while the total issuance is not changing. + pub fn weight_of_fn() -> Box VoteWeight> { + // NOTE: changing this to unboxed `impl Fn(..)` return type and the pallet will still + // compile, while some types in mock fail to resolve. + let issuance = asset::total_issuance::(); + Box::new(move |who: &T::AccountId| -> VoteWeight { + Self::slashable_balance_of_vote_weight(who, issuance) + }) + } + + /// Same as `weight_of_fn`, but made for one time use. + pub fn weight_of(who: &T::AccountId) -> VoteWeight { + let issuance = asset::total_issuance::(); + Self::slashable_balance_of_vote_weight(who, issuance) + } + + pub(super) fn do_bond_extra(stash: &T::AccountId, additional: BalanceOf) -> DispatchResult { + let mut ledger = Self::ledger(StakingAccount::Stash(stash.clone()))?; + + // for virtual stakers, we don't need to check the balance. Since they are only accessed + // via low level apis, we can assume that the caller has done the due diligence. + let extra = if Self::is_virtual_staker(stash) { + additional + } else { + // additional amount or actual balance of stash whichever is lower. + additional.min(asset::free_to_stake::(stash)) + }; + + ledger.total = ledger.total.checked_add(&extra).ok_or(ArithmeticError::Overflow)?; + ledger.active = ledger.active.checked_add(&extra).ok_or(ArithmeticError::Overflow)?; + // last check: the new active amount of ledger must be more than ED. + ensure!(ledger.active >= asset::existential_deposit::(), Error::::InsufficientBond); + + // NOTE: ledger must be updated prior to calling `Self::weight_of`. + ledger.update()?; + // update this staker in the sorted list, if they exist in it. + if T::VoterList::contains(stash) { + // This might fail if the voter list is locked. + let _ = T::VoterList::on_update(&stash, Self::weight_of(stash)); + } + + Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: extra }); + + Ok(()) + } + + pub(super) fn do_withdraw_unbonded( + controller: &T::AccountId, + num_slashing_spans: u32, + ) -> Result { + let mut ledger = Self::ledger(Controller(controller.clone()))?; + let (stash, old_total) = (ledger.stash.clone(), ledger.total); + if let Some(current_era) = CurrentEra::::get() { + ledger = ledger.consolidate_unlocked(current_era) + } + let new_total = ledger.total; + + let ed = asset::existential_deposit::(); + let used_weight = + if ledger.unlocking.is_empty() && (ledger.active < ed || ledger.active.is_zero()) { + // This account must have called `unbond()` with some value that caused the active + // portion to fall below existential deposit + will have no more unlocking chunks + // left. We can now safely remove all staking-related information. + Self::kill_stash(&ledger.stash, num_slashing_spans)?; + + T::WeightInfo::withdraw_unbonded_kill(num_slashing_spans) + } else { + // This was the consequence of a partial unbond. just update the ledger and move on. + ledger.update()?; + + // This is only an update, so we use less overall weight. + T::WeightInfo::withdraw_unbonded_update(num_slashing_spans) + }; + + // `old_total` should never be less than the new total because + // `consolidate_unlocked` strictly subtracts balance. + if new_total < old_total { + // Already checked that this won't overflow by entry condition. + let value = old_total.defensive_saturating_sub(new_total); + Self::deposit_event(Event::::Withdrawn { stash, amount: value }); + + // notify listeners. + T::EventListeners::on_withdraw(controller, value); + } + + Ok(used_weight) + } + + pub(super) fn do_payout_stakers( + validator_stash: T::AccountId, + era: EraIndex, + ) -> DispatchResultWithPostInfo { + let page = Eras::::get_next_claimable_page(era, &validator_stash).ok_or_else(|| { + Error::::AlreadyClaimed.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + + Self::do_payout_stakers_by_page(validator_stash, era, page) + } + + pub(super) fn do_payout_stakers_by_page( + validator_stash: T::AccountId, + era: EraIndex, + page: Page, + ) -> DispatchResultWithPostInfo { + // Validate input data + let current_era = CurrentEra::::get().ok_or_else(|| { + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + + let history_depth = T::HistoryDepth::get(); + + ensure!( + era <= current_era && era >= current_era.saturating_sub(history_depth), + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + ); + + ensure!( + page < Eras::::exposure_page_count(era, &validator_stash), + Error::::InvalidPage.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + ); + + // Note: if era has no reward to be claimed, era may be future. + let era_payout = Eras::::get_validators_reward(era).ok_or_else(|| { + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + + let account = StakingAccount::Stash(validator_stash.clone()); + let ledger = Self::ledger(account.clone()).or_else(|_| { + if StakingLedger::::is_bonded(account) { + Err(Error::::NotController.into()) + } else { + Err(Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) + } + })?; + + ledger.clone().update()?; + + let stash = ledger.stash.clone(); + + if Eras::::is_rewards_claimed(era, &stash, page) { + return Err(Error::::AlreadyClaimed + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) + } + + Eras::::set_rewards_as_claimed(era, &stash, page); + + let exposure = Eras::::get_paged_exposure(era, &stash, page).ok_or_else(|| { + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + + // Input data seems good, no errors allowed after this point + + // Get Era reward points. It has TOTAL and INDIVIDUAL + // Find the fraction of the era reward that belongs to the validator + // Take that fraction of the eras rewards to split to nominator and validator + // + // Then look at the validator, figure out the proportion of their reward + // which goes to them and each of their nominators. + + let era_reward_points = Eras::::get_reward_points(era); + let total_reward_points = era_reward_points.total; + let validator_reward_points = + era_reward_points.individual.get(&stash).copied().unwrap_or_else(Zero::zero); + + // Nothing to do if they have no reward points. + if validator_reward_points.is_zero() { + return Ok(Some(T::WeightInfo::payout_stakers_alive_staked(0)).into()) + } + + // This is the fraction of the total reward that the validator and the + // nominators will get. + let validator_total_reward_part = + Perbill::from_rational(validator_reward_points, total_reward_points); + + // This is how much validator + nominators are entitled to. + let validator_total_payout = validator_total_reward_part * era_payout; + + let validator_commission = Eras::::get_validator_commission(era, &ledger.stash); + // total commission validator takes across all nominator pages + let validator_total_commission_payout = validator_commission * validator_total_payout; + + let validator_leftover_payout = + validator_total_payout.defensive_saturating_sub(validator_total_commission_payout); + // Now let's calculate how this is split to the validator. + let validator_exposure_part = Perbill::from_rational(exposure.own(), exposure.total()); + let validator_staking_payout = validator_exposure_part * validator_leftover_payout; + let page_stake_part = Perbill::from_rational(exposure.page_total(), exposure.total()); + // validator commission is paid out in fraction across pages proportional to the page stake. + let validator_commission_payout = page_stake_part * validator_total_commission_payout; + + Self::deposit_event(Event::::PayoutStarted { + era_index: era, + validator_stash: stash.clone(), + page, + next: Eras::::get_next_claimable_page(era, &stash), + }); + + let mut total_imbalance = PositiveImbalanceOf::::zero(); + // We can now make total validator payout: + if let Some((imbalance, dest)) = + Self::make_payout(&stash, validator_staking_payout + validator_commission_payout) + { + Self::deposit_event(Event::::Rewarded { stash, dest, amount: imbalance.peek() }); + total_imbalance.subsume(imbalance); + } + + // Track the number of payout ops to nominators. Note: + // `WeightInfo::payout_stakers_alive_staked` always assumes at least a validator is paid + // out, so we do not need to count their payout op. + let mut nominator_payout_count: u32 = 0; + + // Lets now calculate how this is split to the nominators. + // Reward only the clipped exposures. Note this is not necessarily sorted. + for nominator in exposure.others().iter() { + let nominator_exposure_part = Perbill::from_rational(nominator.value, exposure.total()); + + let nominator_reward: BalanceOf = + nominator_exposure_part * validator_leftover_payout; + // We can now make nominator payout: + if let Some((imbalance, dest)) = Self::make_payout(&nominator.who, nominator_reward) { + // Note: this logic does not count payouts for `RewardDestination::None`. + nominator_payout_count += 1; + let e = Event::::Rewarded { + stash: nominator.who.clone(), + dest, + amount: imbalance.peek(), + }; + Self::deposit_event(e); + total_imbalance.subsume(imbalance); + } + } + + T::Reward::on_unbalanced(total_imbalance); + debug_assert!(nominator_payout_count <= T::MaxExposurePageSize::get()); + + Ok(Some(T::WeightInfo::payout_stakers_alive_staked(nominator_payout_count)).into()) + } + + /// Chill a stash account. + pub(crate) fn chill_stash(stash: &T::AccountId) { + let chilled_as_validator = Self::do_remove_validator(stash); + let chilled_as_nominator = Self::do_remove_nominator(stash); + if chilled_as_validator || chilled_as_nominator { + Self::deposit_event(Event::::Chilled { stash: stash.clone() }); + } + } + + /// Actually make a payment to a staker. This uses the currency's reward function + /// to pay the right payee for the given staker account. + fn make_payout( + stash: &T::AccountId, + amount: BalanceOf, + ) -> Option<(PositiveImbalanceOf, RewardDestination)> { + // noop if amount is zero + if amount.is_zero() { + return None + } + let dest = Self::payee(StakingAccount::Stash(stash.clone()))?; + + let maybe_imbalance = match dest { + RewardDestination::Stash => asset::mint_into_existing::(stash, amount), + RewardDestination::Staked => Self::ledger(Stash(stash.clone())) + .and_then(|mut ledger| { + ledger.active += amount; + ledger.total += amount; + let r = asset::mint_into_existing::(stash, amount); + + let _ = ledger + .update() + .defensive_proof("ledger fetched from storage, so it exists; qed."); + + Ok(r) + }) + .unwrap_or_default(), + RewardDestination::Account(ref dest_account) => + Some(asset::mint_creating::(&dest_account, amount)), + RewardDestination::None => None, + #[allow(deprecated)] + RewardDestination::Controller => Self::bonded(stash) + .map(|controller| { + defensive!("Paying out controller as reward destination which is deprecated and should be migrated."); + // This should never happen once payees with a `Controller` variant have been migrated. + // But if it does, just pay the controller account. + asset::mint_creating::(&controller, amount) + }), + }; + maybe_imbalance.map(|imbalance| (imbalance, dest)) + } + + /// Remove all associated data of a stash account from the staking system. + /// + /// Assumes storage is upgraded before calling. + /// + /// This is called: + /// - after a `withdraw_unbonded()` call that frees all of a stash's bonded balance. + /// - through `reap_stash()` if the balance has fallen to zero (through slashing). + pub(crate) fn kill_stash(stash: &T::AccountId, num_slashing_spans: u32) -> DispatchResult { + slashing::clear_stash_metadata::(&stash, num_slashing_spans)?; + + // removes controller from `Bonded` and staking ledger from `Ledger`, as well as reward + // setting of the stash in `Payee`. + StakingLedger::::kill(&stash)?; + + Self::do_remove_validator(&stash); + Self::do_remove_nominator(&stash); + + Ok(()) + } + + #[cfg(test)] + pub(crate) fn reward_by_ids(validators_points: impl IntoIterator) { + Eras::::reward_active_era(validators_points) + } + + /// Helper to set a new `ForceEra` mode. + pub(crate) fn set_force_era(mode: Forcing) { + log!(info, "Setting force era mode {:?}.", mode); + ForceEra::::put(mode); + Self::deposit_event(Event::::ForceEra { mode }); + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn add_era_stakers( + current_era: EraIndex, + stash: T::AccountId, + exposure: Exposure>, + ) { + Eras::::upsert_exposure(current_era, &stash, exposure); + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn set_slash_reward_fraction(fraction: Perbill) { + SlashRewardFraction::::put(fraction); + } + + /// Get all the voters associated with `page` that are eligible for the npos election. + /// + /// `bounds` can impose a cap on the number of voters returned per page. + /// + /// Sets `MinimumActiveStake` to the minimum active nominator stake in the returned set of + /// nominators. + /// + /// Note: in the context of the multi-page snapshot, we expect the *order* of `VoterList` and + /// `TargetList` not to change while the pages are being processed. + pub(crate) fn get_npos_voters( + bounds: DataProviderBounds, + status: &SnapshotStatus, + ) -> Vec> { + let mut voters_size_tracker: StaticTracker = StaticTracker::default(); + + let page_len_prediction = { + let all_voter_count = T::VoterList::count(); + bounds.count.unwrap_or(all_voter_count.into()).min(all_voter_count.into()).0 + }; + + let mut all_voters = Vec::<_>::with_capacity(page_len_prediction as usize); + + // cache a few things. + let weight_of = Self::weight_of_fn(); + + let mut voters_seen = 0u32; + let mut validators_taken = 0u32; + let mut nominators_taken = 0u32; + let mut min_active_stake = u64::MAX; + + let mut sorted_voters = match status { + // start the snapshot processing from the beginning. + SnapshotStatus::Waiting => T::VoterList::iter(), + // snapshot continues, start from the last iterated voter in the list. + SnapshotStatus::Ongoing(account_id) => T::VoterList::iter_from(&account_id) + .defensive_unwrap_or(Box::new(vec![].into_iter())), + // all voters have been consumed already, return an empty iterator. + SnapshotStatus::Consumed => Box::new(vec![].into_iter()), + }; + + while all_voters.len() < page_len_prediction as usize && + voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * page_len_prediction as u32) + { + let voter = match sorted_voters.next() { + Some(voter) => { + voters_seen.saturating_inc(); + voter + }, + None => break, + }; + + let voter_weight = weight_of(&voter); + // if voter weight is zero, do not consider this voter for the snapshot. + if voter_weight.is_zero() { + log!(debug, "voter's active balance is 0. skip this voter."); + continue + } + + if let Some(Nominations { targets, .. }) = >::get(&voter) { + if !targets.is_empty() { + // Note on lazy nomination quota: we do not check the nomination quota of the + // voter at this point and accept all the current nominations. The nomination + // quota is only enforced at `nominate` time. + + let voter = (voter, voter_weight, targets); + if voters_size_tracker.try_register_voter(&voter, &bounds).is_err() { + // no more space left for the election result, stop iterating. + Self::deposit_event(Event::::SnapshotVotersSizeExceeded { + size: voters_size_tracker.size as u32, + }); + break + } + + all_voters.push(voter); + nominators_taken.saturating_inc(); + } else { + defensive!("non-nominator fetched from voter list: {:?}", voter); + // technically should never happen, but not much we can do about it. + } + min_active_stake = + if voter_weight < min_active_stake { voter_weight } else { min_active_stake }; + } else if Validators::::contains_key(&voter) { + // if this voter is a validator: + let self_vote = ( + voter.clone(), + voter_weight, + vec![voter.clone()] + .try_into() + .expect("`MaxVotesPerVoter` must be greater than or equal to 1"), + ); + + if voters_size_tracker.try_register_voter(&self_vote, &bounds).is_err() { + // no more space left for the election snapshot, stop iterating. + Self::deposit_event(Event::::SnapshotVotersSizeExceeded { + size: voters_size_tracker.size as u32, + }); + break + } + all_voters.push(self_vote); + validators_taken.saturating_inc(); + } else { + // this can only happen if: 1. there a bug in the bags-list (or whatever is the + // sorted list) logic and the state of the two pallets is no longer compatible, or + // because the nominators is not decodable since they have more nomination than + // `T::NominationsQuota::get_quota`. The latter can rarely happen, and is not + // really an emergency or bug if it does. + defensive!( + "invalid item in `VoterList`: {:?}, this nominator probably has too many nominations now", + voter, + ); + } + } + + // all_voters should have not re-allocated. + debug_assert!(all_voters.capacity() == page_len_prediction as usize); + + let min_active_stake: T::CurrencyBalance = + if all_voters.is_empty() { Zero::zero() } else { min_active_stake.into() }; + + MinimumActiveStake::::put(min_active_stake); + + all_voters + } + + /// Get all the targets associated are eligible for the npos election. + /// + /// The target snapshot is *always* single paged. + /// + /// This function is self-weighing as [`DispatchClass::Mandatory`]. + pub fn get_npos_targets(bounds: DataProviderBounds) -> Vec { + let mut targets_size_tracker: StaticTracker = StaticTracker::default(); + + let final_predicted_len = { + let all_target_count = T::TargetList::count(); + bounds.count.unwrap_or(all_target_count.into()).min(all_target_count.into()).0 + }; + + let mut all_targets = Vec::::with_capacity(final_predicted_len as usize); + let mut targets_seen = 0; + + let mut targets_iter = T::TargetList::iter(); + while all_targets.len() < final_predicted_len as usize && + targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * final_predicted_len as u32) + { + let target = match targets_iter.next() { + Some(target) => { + targets_seen.saturating_inc(); + target + }, + None => break, + }; + + if targets_size_tracker.try_register_target(target.clone(), &bounds).is_err() { + // no more space left for the election snapshot, stop iterating. + log!(warn, "npos targets size exceeded, stopping iteration."); + Self::deposit_event(Event::::SnapshotTargetsSizeExceeded { + size: targets_size_tracker.size as u32, + }); + break + } + + if Validators::::contains_key(&target) { + all_targets.push(target); + } + } + + log!(info, "[bounds {:?}] generated {} npos targets", bounds, all_targets.len()); + + all_targets + } + + /// This function will add a nominator to the `Nominators` storage map, + /// and `VoterList`. + /// + /// If the nominator already exists, their nominations will be updated. + /// + /// NOTE: you must ALWAYS use this function to add nominator or update their targets. Any access + /// to `Nominators` or `VoterList` outside of this function is almost certainly + /// wrong. + pub fn do_add_nominator(who: &T::AccountId, nominations: Nominations) { + if !Nominators::::contains_key(who) { + // maybe update sorted list. + let _ = T::VoterList::on_insert(who.clone(), Self::weight_of(who)) + .defensive_unwrap_or_default(); + } + Nominators::::insert(who, nominations); + + debug_assert_eq!( + Nominators::::count() + Validators::::count(), + T::VoterList::count() + ); + } + + /// This function will remove a nominator from the `Nominators` storage map, + /// and `VoterList`. + /// + /// Returns true if `who` was removed from `Nominators`, otherwise false. + /// + /// NOTE: you must ALWAYS use this function to remove a nominator from the system. Any access to + /// `Nominators` or `VoterList` outside of this function is almost certainly + /// wrong. + pub fn do_remove_nominator(who: &T::AccountId) -> bool { + let outcome = if Nominators::::contains_key(who) { + Nominators::::remove(who); + let _ = T::VoterList::on_remove(who); + true + } else { + false + }; + + debug_assert_eq!( + Nominators::::count() + Validators::::count(), + T::VoterList::count() + ); + + outcome + } + + /// This function will add a validator to the `Validators` storage map. + /// + /// If the validator already exists, their preferences will be updated. + /// + /// NOTE: you must ALWAYS use this function to add a validator to the system. Any access to + /// `Validators` or `VoterList` outside of this function is almost certainly + /// wrong. + pub fn do_add_validator(who: &T::AccountId, prefs: ValidatorPrefs) { + if !Validators::::contains_key(who) { + // maybe update sorted list. + let _ = T::VoterList::on_insert(who.clone(), Self::weight_of(who)); + } + Validators::::insert(who, prefs); + + debug_assert_eq!( + Nominators::::count() + Validators::::count(), + T::VoterList::count() + ); + } + + /// This function will remove a validator from the `Validators` storage map. + /// + /// Returns true if `who` was removed from `Validators`, otherwise false. + /// + /// NOTE: you must ALWAYS use this function to remove a validator from the system. Any access to + /// `Validators` or `VoterList` outside of this function is almost certainly + /// wrong. + pub fn do_remove_validator(who: &T::AccountId) -> bool { + let outcome = if Validators::::contains_key(who) { + Validators::::remove(who); + let _ = T::VoterList::on_remove(who); + true + } else { + false + }; + + debug_assert_eq!( + Nominators::::count() + Validators::::count(), + T::VoterList::count() + ); + + outcome + } + + /// Register some amount of weight directly with the system pallet. + /// + /// This is always mandatory weight. + pub(crate) fn register_weight(weight: Weight) { + >::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, + ); + } + + /// Returns full exposure of a validator for a given era. + /// + /// History note: This used to be a getter for old storage item `ErasStakers` deprecated in v14 + /// and deleted in v17. Since this function is used in the codebase at various places, we kept + /// it as a custom getter that takes care of getting the full exposure of the validator in a + /// backward compatible way. + pub fn eras_stakers( + era: EraIndex, + account: &T::AccountId, + ) -> Exposure> { + Eras::::get_full_exposure(era, account) + } + + pub(super) fn do_migrate_currency(stash: &T::AccountId) -> DispatchResult { + if Self::is_virtual_staker(stash) { + return Self::do_migrate_virtual_staker(stash); + } + + let ledger = Self::ledger(Stash(stash.clone()))?; + let staked: BalanceOf = T::OldCurrency::balance_locked(STAKING_ID, stash).into(); + ensure!(!staked.is_zero(), Error::::AlreadyMigrated); + ensure!(ledger.total == staked, Error::::BadState); + + // remove old staking lock + T::OldCurrency::remove_lock(STAKING_ID, &stash); + + // check if we can hold all stake. + let max_hold = asset::free_to_stake::(&stash); + let force_withdraw = if max_hold >= staked { + // this means we can hold all stake. yay! + asset::update_stake::(&stash, staked)?; + Zero::zero() + } else { + // if we are here, it means we cannot hold all user stake. We will do a force withdraw + // from ledger, but that's okay since anyways user do not have funds for it. + let force_withdraw = staked.saturating_sub(max_hold); + + // we ignore if active is 0. It implies the locked amount is not actively staked. The + // account can still get away from potential slash but we can't do much better here. + StakingLedger { + total: max_hold, + active: ledger.active.saturating_sub(force_withdraw), + // we are not changing the stash, so we can keep the stash. + ..ledger + } + .update()?; + force_withdraw + }; + + // Get rid of the extra consumer we used to have with OldCurrency. + frame_system::Pallet::::dec_consumers(&stash); + + Self::deposit_event(Event::::CurrencyMigrated { stash: stash.clone(), force_withdraw }); + Ok(()) + } + + fn do_migrate_virtual_staker(stash: &T::AccountId) -> DispatchResult { + // Funds for virtual stakers not managed/held by this pallet. We only need to clear + // the extra consumer we used to have with OldCurrency. + frame_system::Pallet::::dec_consumers(&stash); + + // The delegation system that manages the virtual staker needed to increment provider + // previously because of the consumer needed by this pallet. In reality, this stash + // is just a key for managing the ledger and the account does not need to hold any + // balance or exist. We decrement this provider. + let actual_providers = frame_system::Pallet::::providers(stash); + + let expected_providers = + // provider is expected to be 1 but someone can always transfer some free funds to + // these accounts, increasing the provider. + if asset::free_to_stake::(&stash) >= asset::existential_deposit::() { + 2 + } else { + 1 + }; + + // We should never have more than expected providers. + ensure!(actual_providers <= expected_providers, Error::::BadState); + + // if actual provider is less than expected, it is already migrated. + ensure!(actual_providers == expected_providers, Error::::AlreadyMigrated); + + // dec provider + let _ = frame_system::Pallet::::dec_providers(&stash)?; + + return Ok(()) + } +} + +impl Pallet { + /// Returns the current nominations quota for nominators. + /// + /// Used by the runtime API. + pub fn api_nominations_quota(balance: BalanceOf) -> u32 { + T::NominationsQuota::get_quota(balance) + } + + pub fn api_eras_stakers( + era: EraIndex, + account: T::AccountId, + ) -> Exposure> { + Self::eras_stakers(era, &account) + } + + pub fn api_eras_stakers_page_count(era: EraIndex, account: T::AccountId) -> Page { + Eras::::exposure_page_count(era, &account) + } + + pub fn api_pending_rewards(era: EraIndex, account: T::AccountId) -> bool { + Eras::::pending_rewards(era, &account) + } +} + +impl ElectionDataProvider for Pallet { + type AccountId = T::AccountId; + type BlockNumber = BlockNumberFor; + type MaxVotesPerVoter = MaxNominationsOf; + + fn desired_targets() -> data_provider::Result { + Self::register_weight(T::DbWeight::get().reads(1)); + Ok(ValidatorCount::::get()) + } + + fn electing_voters( + bounds: DataProviderBounds, + page: PageIndex, + ) -> data_provider::Result>> { + let mut status = VoterSnapshotStatus::::get(); + let voters = Self::get_npos_voters(bounds, &status); + + // update the voter snapshot status. + match (page, &status) { + // last page, reset status for next round. + (0, _) => status = SnapshotStatus::Waiting, + + (_, SnapshotStatus::Waiting) | (_, SnapshotStatus::Ongoing(_)) => { + let maybe_last = voters.last().map(|(x, _, _)| x).cloned(); + + if let Some(ref last) = maybe_last { + let has_next = + T::VoterList::iter_from(last).ok().and_then(|mut i| i.next()).is_some(); + if has_next { + status = SnapshotStatus::Ongoing(last.clone()); + } else { + status = SnapshotStatus::Consumed; + } + } + }, + // do nothing. + (_, SnapshotStatus::Consumed) => (), + } + + log!( + debug, + "[page {}, status {:?} (stake?: {:?}), bounds {:?}] generated {} npos voters", + page, + VoterSnapshotStatus::::get(), + if let SnapshotStatus::Ongoing(x) = VoterSnapshotStatus::::get() { + Self::weight_of(&x) + } else { + Zero::zero() + }, + bounds, + voters.len(), + ); + + match status { + SnapshotStatus::Ongoing(_) => T::VoterList::lock(), + _ => T::VoterList::unlock(), + } + + VoterSnapshotStatus::::put(status); + + debug_assert!(!bounds.slice_exhausted(&voters)); + + Ok(voters) + } + + fn electing_voters_stateless( + bounds: DataProviderBounds, + ) -> data_provider::Result>> { + let voters = Self::get_npos_voters(bounds, &SnapshotStatus::Waiting); + log!( + debug, + "[stateless, status {:?}, bounds {:?}] generated {} npos voters", + VoterSnapshotStatus::::get(), + bounds, + voters.len(), + ); + Ok(voters) + } + + fn electable_targets( + bounds: DataProviderBounds, + page: PageIndex, + ) -> data_provider::Result> { + if page > 0 { + log!(warn, "multi-page target snapshot not supported, returning page 0."); + } + + let targets = Self::get_npos_targets(bounds); + // We can't handle this case yet -- return an error. WIP to improve handling this case in + // . + if bounds.exhausted(None, CountBound(targets.len() as u32).into()) { + return Err("Target snapshot too big") + } + + debug_assert!(!bounds.slice_exhausted(&targets)); + + Ok(targets) + } + + fn next_election_prediction(_: BlockNumberFor) -> BlockNumberFor { + debug_assert!(false, "this is deprecated and not used anymore"); + sp_runtime::traits::Bounded::max_value() + } + + #[cfg(feature = "runtime-benchmarks")] + fn fetch_page(page: PageIndex) { + session_rotation::EraElectionPlanner::::do_elect_paged(page); + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_voter( + voter: T::AccountId, + weight: VoteWeight, + targets: BoundedVec, + ) { + let stake = >::try_from(weight).unwrap_or_else(|_| { + panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") + }); + >::insert(voter.clone(), voter.clone()); + >::insert(voter.clone(), StakingLedger::::new(voter.clone(), stake)); + + Self::do_add_nominator(&voter, Nominations { targets, submitted_in: 0, suppressed: false }); + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_target(target: T::AccountId) { + let stake = (MinValidatorBond::::get() + 1u32.into()) * 100u32.into(); + >::insert(target.clone(), target.clone()); + >::insert(target.clone(), StakingLedger::::new(target.clone(), stake)); + Self::do_add_validator( + &target, + ValidatorPrefs { commission: Perbill::zero(), blocked: false }, + ); + } + + #[cfg(feature = "runtime-benchmarks")] + fn clear() { + #[allow(deprecated)] + >::remove_all(None); + #[allow(deprecated)] + >::remove_all(None); + #[allow(deprecated)] + >::remove_all(); + #[allow(deprecated)] + >::remove_all(); + + T::VoterList::unsafe_clear(); + } + + #[cfg(feature = "runtime-benchmarks")] + fn put_snapshot( + voters: Vec>, + targets: Vec, + target_stake: Option, + ) { + targets.into_iter().for_each(|v| { + let stake: BalanceOf = target_stake + .and_then(|w| >::try_from(w).ok()) + .unwrap_or_else(|| MinNominatorBond::::get() * 100u32.into()); + >::insert(v.clone(), v.clone()); + >::insert(v.clone(), StakingLedger::::new(v.clone(), stake)); + Self::do_add_validator( + &v, + ValidatorPrefs { commission: Perbill::zero(), blocked: false }, + ); + }); + + voters.into_iter().for_each(|(v, s, t)| { + let stake = >::try_from(s).unwrap_or_else(|_| { + panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") + }); + >::insert(v.clone(), v.clone()); + >::insert(v.clone(), StakingLedger::::new(v.clone(), stake)); + Self::do_add_nominator( + &v, + Nominations { targets: t, submitted_in: 0, suppressed: false }, + ); + }); + } + + #[cfg(feature = "runtime-benchmarks")] + fn set_desired_targets(count: u32) { + ValidatorCount::::put(count); + } +} + +impl rc_client::AHStakingInterface for Pallet { + type AccountId = T::AccountId; + type MaxValidatorSet = T::MaxValidatorSet; + + /// When we receive a session report from the relay chain, it kicks off the next session. + /// + /// There are three special types of things we can do in a session: + /// 1. Plan a new era: We do this one session before the expected era rotation. + /// 2. Kick off election: We do this based on the [`Config::PlanningEraOffset`] configuration. + /// 3. Activate Next Era: When we receive an activation timestamp in the session report, it + /// implies a new validator set has been applied, and we must increment the active era to keep + /// the systems in sync. + fn on_relay_session_report(report: rc_client::SessionReport) { + log!(debug, "session report received\n{:?}", report,); + let consumed_weight = T::WeightInfo::rc_on_session_report(); + + let rc_client::SessionReport { + end_index, + activation_timestamp, + validator_points, + leftover, + } = report; + debug_assert!(!leftover); + + Eras::::reward_active_era(validator_points.into_iter()); + session_rotation::Rotator::::end_session(end_index, activation_timestamp); + // NOTE: we might want to either return these weights so that they are registered in the + // rc-client pallet, or directly benchmarked there, such that we can use them in the + // "pre-dispatch" fashion. That said, since these are all `Mandatory` weights, it doesn't + // make that big of a difference. + Self::register_weight(consumed_weight); + } + + fn on_new_offences( + slash_session: SessionIndex, + offences: Vec>, + ) { + log!(debug, "🦹 on_new_offences: {:?}", offences); + let consumed_weight = T::WeightInfo::rc_on_offence(offences.len() as u32); + + // Find the era to which offence belongs. + let Some(active_era) = ActiveEra::::get() else { + log!(warn, "🦹 on_new_offences: no active era; ignoring offence"); + return + }; + + let active_era_start_session = + ErasStartSessionIndex::::get(active_era.index).unwrap_or(0); + + // Fast path for active-era report - most likely. + // `slash_session` cannot be in a future active era. It must be in `active_era` or before. + let offence_era = if slash_session >= active_era_start_session { + active_era.index + } else { + match BondedEras::::get() + .iter() + // Reverse because it's more likely to find reports from recent eras. + .rev() + .find(|&(_, sesh)| sesh <= &slash_session) + .map(|(era, _)| *era) + { + Some(era) => era, + None => { + // defensive: this implies offence is for a discarded era, and should already be + // filtered out. + log!(warn, "🦹 on_offence: no era found for slash_session; ignoring offence"); + return + }, + } + }; + + let invulnerables = Invulnerables::::get(); + + for o in offences { + let slash_fraction = o.slash_fraction; + let validator: ::AccountId = o.offender.into(); + // Skip if the validator is invulnerable. + if invulnerables.contains(&validator) { + log!(debug, "🦹 on_offence: {:?} is invulnerable; ignoring offence", validator); + continue + } + + let Some(exposure_overview) = >::get(&offence_era, &validator) + else { + // defensive: this implies offence is for a discarded era, and should already be + // filtered out. + log!( + warn, + "🦹 on_offence: no exposure found for {:?} in era {}; ignoring offence", + validator, + offence_era + ); + continue; + }; + + Self::deposit_event(Event::::OffenceReported { + validator: validator.clone(), + fraction: slash_fraction, + offence_era, + }); + + let prior_slash_fraction = ValidatorSlashInEra::::get(offence_era, &validator) + .map_or(Zero::zero(), |(f, _)| f); + + if let Some(existing) = OffenceQueue::::get(offence_era, &validator) { + if slash_fraction.deconstruct() > existing.slash_fraction.deconstruct() { + OffenceQueue::::insert( + offence_era, + &validator, + OffenceRecord { + reporter: o.reporters.first().cloned(), + reported_era: active_era.index, + slash_fraction, + ..existing + }, + ); + + // update the slash fraction in the `ValidatorSlashInEra` storage. + ValidatorSlashInEra::::insert( + offence_era, + &validator, + (slash_fraction, exposure_overview.own), + ); + + log!( + debug, + "🦹 updated slash for {:?}: {:?} (prior: {:?})", + validator, + slash_fraction, + prior_slash_fraction, + ); + } else { + log!( + debug, + "🦹 ignored slash for {:?}: {:?} (existing prior is larger: {:?})", + validator, + slash_fraction, + prior_slash_fraction, + ); + } + } else if slash_fraction.deconstruct() > prior_slash_fraction.deconstruct() { + ValidatorSlashInEra::::insert( + offence_era, + &validator, + (slash_fraction, exposure_overview.own), + ); + + OffenceQueue::::insert( + offence_era, + &validator, + OffenceRecord { + reporter: o.reporters.first().cloned(), + reported_era: active_era.index, + // there are cases of validator with no exposure, hence 0 page, so we + // saturate to avoid underflow. + exposure_page: exposure_overview.page_count.saturating_sub(1), + slash_fraction, + prior_slash_fraction, + }, + ); + + OffenceQueueEras::::mutate(|q| { + if let Some(eras) = q { + log!(debug, "🦹 inserting offence era {} into existing queue", offence_era); + eras.binary_search(&offence_era).err().map(|idx| { + eras.try_insert(idx, offence_era).defensive_proof( + "Offence era must be present in the existing queue", + ) + }); + } else { + let mut eras = BoundedVec::default(); + log!(debug, "🦹 inserting offence era {} into empty queue", offence_era); + let _ = eras + .try_push(offence_era) + .defensive_proof("Failed to push offence era into empty queue"); + *q = Some(eras); + } + }); + + log!( + debug, + "🦹 queued slash for {:?}: {:?} (prior: {:?})", + validator, + slash_fraction, + prior_slash_fraction, + ); + } else { + log!( + debug, + "🦹 ignored slash for {:?}: {:?} (already slashed in era with prior: {:?})", + validator, + slash_fraction, + prior_slash_fraction, + ); + } + } + + Self::register_weight(consumed_weight); + } +} + +impl ScoreProvider for Pallet { + type Score = VoteWeight; + + fn score(who: &T::AccountId) -> Option { + Self::ledger(Stash(who.clone())) + .map(|l| l.active) + .map(|a| { + let issuance = asset::total_issuance::(); + T::CurrencyToVote::to_vote(a, issuance) + }) + .ok() + } + + #[cfg(feature = "runtime-benchmarks")] + fn set_score_of(who: &T::AccountId, weight: Self::Score) { + // this will clearly results in an inconsistent state, but it should not matter for a + // benchmark. + let active: BalanceOf = weight.try_into().map_err(|_| ()).unwrap(); + let mut ledger = match Self::ledger(StakingAccount::Stash(who.clone())) { + Ok(l) => l, + Err(_) => StakingLedger::default_from(who.clone()), + }; + ledger.active = active; + + >::insert(who, ledger); + >::insert(who, who); + + // also, we play a trick to make sure that a issuance based-`CurrencyToVote` behaves well: + // This will make sure that total issuance is zero, thus the currency to vote will be a 1-1 + // conversion. + let imbalance = asset::burn::(asset::total_issuance::()); + // kinda ugly, but gets the job done. The fact that this works here is a HUGE exception. + // Don't try this pattern in other places. + core::mem::forget(imbalance); + } +} + +/// A simple sorted list implementation that does not require any additional pallets. Note, this +/// does not provide validators in sorted order. If you desire nominators in a sorted order take +/// a look at [`pallet-bags-list`]. +pub struct UseValidatorsMap(core::marker::PhantomData); +impl SortedListProvider for UseValidatorsMap { + type Score = BalanceOf; + type Error = (); + + /// Returns iterator over voter list, which can have `take` called on it. + fn iter() -> Box> { + Box::new(Validators::::iter().map(|(v, _)| v)) + } + fn iter_from( + start: &T::AccountId, + ) -> Result>, Self::Error> { + if Validators::::contains_key(start) { + let start_key = Validators::::hashed_key_for(start); + Ok(Box::new(Validators::::iter_from(start_key).map(|(n, _)| n))) + } else { + Err(()) + } + } + fn lock() {} + fn unlock() {} + fn count() -> u32 { + Validators::::count() + } + fn contains(id: &T::AccountId) -> bool { + Validators::::contains_key(id) + } + fn on_insert(_: T::AccountId, _weight: Self::Score) -> Result<(), Self::Error> { + // nothing to do on insert. + Ok(()) + } + fn get_score(id: &T::AccountId) -> Result { + Ok(Pallet::::weight_of(id).into()) + } + fn on_update(_: &T::AccountId, _weight: Self::Score) -> Result<(), Self::Error> { + // nothing to do on update. + Ok(()) + } + fn on_remove(_: &T::AccountId) -> Result<(), Self::Error> { + // nothing to do on remove. + Ok(()) + } + fn unsafe_regenerate( + _: impl IntoIterator, + _: Box Option>, + ) -> u32 { + // nothing to do upon regenerate. + 0 + } + #[cfg(feature = "try-runtime")] + fn try_state() -> Result<(), TryRuntimeError> { + Ok(()) + } + + fn unsafe_clear() { + #[allow(deprecated)] + Validators::::remove_all(); + } + + #[cfg(feature = "runtime-benchmarks")] + fn score_update_worst_case(_who: &T::AccountId, _is_increase: bool) -> Self::Score { + unimplemented!() + } +} + +/// A simple voter list implementation that does not require any additional pallets. Note, this +/// does not provided nominators in sorted ordered. If you desire nominators in a sorted order take +/// a look at [`pallet-bags-list]. +pub struct UseNominatorsAndValidatorsMap(core::marker::PhantomData); +impl SortedListProvider for UseNominatorsAndValidatorsMap { + type Error = (); + type Score = VoteWeight; + + fn iter() -> Box> { + Box::new( + Validators::::iter() + .map(|(v, _)| v) + .chain(Nominators::::iter().map(|(n, _)| n)), + ) + } + fn iter_from( + start: &T::AccountId, + ) -> Result>, Self::Error> { + if Validators::::contains_key(start) { + let start_key = Validators::::hashed_key_for(start); + Ok(Box::new( + Validators::::iter_from(start_key) + .map(|(n, _)| n) + .chain(Nominators::::iter().map(|(x, _)| x)), + )) + } else if Nominators::::contains_key(start) { + let start_key = Nominators::::hashed_key_for(start); + Ok(Box::new(Nominators::::iter_from(start_key).map(|(n, _)| n))) + } else { + Err(()) + } + } + fn lock() {} + fn unlock() {} + fn count() -> u32 { + Nominators::::count().saturating_add(Validators::::count()) + } + fn contains(id: &T::AccountId) -> bool { + Nominators::::contains_key(id) || Validators::::contains_key(id) + } + fn on_insert(_: T::AccountId, _weight: Self::Score) -> Result<(), Self::Error> { + // nothing to do on insert. + Ok(()) + } + fn get_score(id: &T::AccountId) -> Result { + Ok(Pallet::::weight_of(id)) + } + fn on_update(_: &T::AccountId, _weight: Self::Score) -> Result<(), Self::Error> { + // nothing to do on update. + Ok(()) + } + fn on_remove(_: &T::AccountId) -> Result<(), Self::Error> { + // nothing to do on remove. + Ok(()) + } + fn unsafe_regenerate( + _: impl IntoIterator, + _: Box Option>, + ) -> u32 { + // nothing to do upon regenerate. + 0 + } + + #[cfg(feature = "try-runtime")] + fn try_state() -> Result<(), TryRuntimeError> { + Ok(()) + } + + fn unsafe_clear() { + // NOTE: Caller must ensure this doesn't lead to too many storage accesses. This is a + // condition of SortedListProvider::unsafe_clear. + #[allow(deprecated)] + Nominators::::remove_all(); + #[allow(deprecated)] + Validators::::remove_all(); + } + + #[cfg(feature = "runtime-benchmarks")] + fn score_update_worst_case(_who: &T::AccountId, _is_increase: bool) -> Self::Score { + unimplemented!() + } +} + +impl StakingInterface for Pallet { + type AccountId = T::AccountId; + type Balance = BalanceOf; + type CurrencyToVote = T::CurrencyToVote; + + fn minimum_nominator_bond() -> Self::Balance { + MinNominatorBond::::get() + } + + fn minimum_validator_bond() -> Self::Balance { + MinValidatorBond::::get() + } + + fn stash_by_ctrl(controller: &Self::AccountId) -> Result { + Self::ledger(Controller(controller.clone())) + .map(|l| l.stash) + .map_err(|e| e.into()) + } + + fn bonding_duration() -> EraIndex { + T::BondingDuration::get() + } + + fn current_era() -> EraIndex { + CurrentEra::::get().unwrap_or(Zero::zero()) + } + + fn stake(who: &Self::AccountId) -> Result>, DispatchError> { + Self::ledger(Stash(who.clone())) + .map(|l| Stake { total: l.total, active: l.active }) + .map_err(|e| e.into()) + } + + fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { + Self::bond_extra(RawOrigin::Signed(who.clone()).into(), extra) + } + + fn unbond(who: &Self::AccountId, value: Self::Balance) -> DispatchResult { + let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; + Self::unbond(RawOrigin::Signed(ctrl).into(), value) + .map_err(|with_post| with_post.error) + .map(|_| ()) + } + + fn set_payee(stash: &Self::AccountId, reward_acc: &Self::AccountId) -> DispatchResult { + // Since virtual stakers are not allowed to compound their rewards as this pallet does not + // manage their locks, we do not allow reward account to be set same as stash. For + // external pallets that manage the virtual bond, they can claim rewards and re-bond them. + ensure!( + !Self::is_virtual_staker(stash) || stash != reward_acc, + Error::::RewardDestinationRestricted + ); + + let ledger = Self::ledger(Stash(stash.clone()))?; + let _ = ledger + .set_payee(RewardDestination::Account(reward_acc.clone())) + .defensive_proof("ledger was retrieved from storage, thus its bonded; qed.")?; + + Ok(()) + } + + fn chill(who: &Self::AccountId) -> DispatchResult { + // defensive-only: any account bonded via this interface has the stash set as the + // controller, but we have to be sure. Same comment anywhere else that we read this. + let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; + Self::chill(RawOrigin::Signed(ctrl).into()) + } + + fn withdraw_unbonded( + who: Self::AccountId, + num_slashing_spans: u32, + ) -> Result { + let ctrl = Self::bonded(&who).ok_or(Error::::NotStash)?; + Self::withdraw_unbonded(RawOrigin::Signed(ctrl.clone()).into(), num_slashing_spans) + .map(|_| !StakingLedger::::is_bonded(StakingAccount::Controller(ctrl))) + .map_err(|with_post| with_post.error) + } + + fn bond( + who: &Self::AccountId, + value: Self::Balance, + payee: &Self::AccountId, + ) -> DispatchResult { + Self::bond( + RawOrigin::Signed(who.clone()).into(), + value, + RewardDestination::Account(payee.clone()), + ) + } + + fn nominate(who: &Self::AccountId, targets: Vec) -> DispatchResult { + let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; + let targets = targets.into_iter().map(T::Lookup::unlookup).collect::>(); + Self::nominate(RawOrigin::Signed(ctrl).into(), targets) + } + + fn desired_validator_count() -> u32 { + ValidatorCount::::get() + } + + fn election_ongoing() -> bool { + ::status().is_ok() + } + + fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { + let num_slashing_spans = + SlashingSpans::::get(&who).map_or(0, |s| s.iter().count() as u32); + Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) + } + + fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { + ErasStakersPaged::::iter_prefix((era,)).any(|((validator, _), exposure_page)| { + validator == *who || exposure_page.others.iter().any(|i| i.who == *who) + }) + } + + fn status( + who: &Self::AccountId, + ) -> Result, DispatchError> { + if !StakingLedger::::is_bonded(StakingAccount::Stash(who.clone())) { + return Err(Error::::NotStash.into()) + } + + let is_validator = Validators::::contains_key(&who); + let is_nominator = Nominators::::get(&who); + + use sp_staking::StakerStatus; + match (is_validator, is_nominator.is_some()) { + (false, false) => Ok(StakerStatus::Idle), + (true, false) => Ok(StakerStatus::Validator), + (false, true) => Ok(StakerStatus::Nominator( + is_nominator.expect("is checked above; qed").targets.into_inner(), + )), + (true, true) => { + defensive!("cannot be both validators and nominator"); + Err(Error::::BadState.into()) + }, + } + } + + /// Whether `who` is a virtual staker whose funds are managed by another pallet. + /// + /// There is an assumption that, this account is keyless and managed by another pallet in the + /// runtime. Hence, it can never sign its own transactions. + fn is_virtual_staker(who: &T::AccountId) -> bool { + frame_system::Pallet::::account_nonce(who).is_zero() && + VirtualStakers::::contains_key(who) + } + + fn slash_reward_fraction() -> Perbill { + SlashRewardFraction::::get() + } + + sp_staking::runtime_benchmarks_enabled! { + fn nominations(who: &Self::AccountId) -> Option> { + Nominators::::get(who).map(|n| n.targets.into_inner()) + } + + fn add_era_stakers( + current_era: &EraIndex, + stash: &T::AccountId, + exposures: Vec<(Self::AccountId, Self::Balance)>, + ) { + let others = exposures + .iter() + .map(|(who, value)| crate::IndividualExposure { who: who.clone(), value: *value }) + .collect::>(); + let exposure = Exposure { total: Default::default(), own: Default::default(), others }; + Eras::::upsert_exposure(*current_era, stash, exposure); + } + + fn set_current_era(era: EraIndex) { + CurrentEra::::put(era); + } + + fn max_exposure_page_size() -> Page { + T::MaxExposurePageSize::get() + } + } +} + +impl sp_staking::StakingUnchecked for Pallet { + fn migrate_to_virtual_staker(who: &Self::AccountId) -> DispatchResult { + asset::kill_stake::(who)?; + VirtualStakers::::insert(who, ()); + Ok(()) + } + + /// Virtually bonds `keyless_who` to `payee` with `value`. + /// + /// The payee must not be the same as the `keyless_who`. + fn virtual_bond( + keyless_who: &Self::AccountId, + value: Self::Balance, + payee: &Self::AccountId, + ) -> DispatchResult { + if StakingLedger::::is_bonded(StakingAccount::Stash(keyless_who.clone())) { + return Err(Error::::AlreadyBonded.into()) + } + + // check if payee not same as who. + ensure!(keyless_who != payee, Error::::RewardDestinationRestricted); + + // mark who as a virtual staker. + VirtualStakers::::insert(keyless_who, ()); + + Self::deposit_event(Event::::Bonded { stash: keyless_who.clone(), amount: value }); + let ledger = StakingLedger::::new(keyless_who.clone(), value); + + ledger.bond(RewardDestination::Account(payee.clone()))?; + + Ok(()) + } + + /// Only meant to be used in tests. + #[cfg(feature = "runtime-benchmarks")] + fn migrate_to_direct_staker(who: &Self::AccountId) { + assert!(VirtualStakers::::contains_key(who)); + let ledger = StakingLedger::::get(Stash(who.clone())).unwrap(); + let _ = asset::update_stake::(who, ledger.total) + .expect("funds must be transferred to stash"); + VirtualStakers::::remove(who); + } +} + +#[cfg(any(test, feature = "try-runtime"))] +impl Pallet { + pub(crate) fn do_try_state(_now: BlockNumberFor) -> Result<(), TryRuntimeError> { + session_rotation::Rotator::::do_try_state()?; + session_rotation::Eras::::do_try_state()?; + Self::check_ledgers()?; + Self::check_bonded_consistency()?; + Self::check_payees()?; + Self::check_paged_exposures()?; + Self::check_count()?; + + Ok(()) + } + + /// Invariants: + /// * A controller should not be associated with more than one ledger. + /// * A bonded (stash, controller) pair should have only one associated ledger. I.e. if the + /// ledger is bonded by stash, the controller account must not bond a different ledger. + /// * A bonded (stash, controller) pair must have an associated ledger. + /// + /// NOTE: these checks result in warnings only. Once + /// is resolved, turn warns into check + /// failures. + fn check_bonded_consistency() -> Result<(), TryRuntimeError> { + use alloc::collections::btree_set::BTreeSet; + + let mut count_controller_double = 0; + let mut count_double = 0; + let mut count_none = 0; + // sanity check to ensure that each controller in Bonded storage is associated with only one + // ledger. + let mut controllers = BTreeSet::new(); + + for (stash, controller) in >::iter() { + if !controllers.insert(controller.clone()) { + count_controller_double += 1; + } + + match (>::get(&stash), >::get(&controller)) { + (Some(_), Some(_)) => + // if stash == controller, it means that the ledger has migrated to + // post-controller. If no migration happened, we expect that the (stash, + // controller) pair has only one associated ledger. + if stash != controller { + count_double += 1; + }, + (None, None) => { + count_none += 1; + }, + _ => {}, + }; + } + + if count_controller_double != 0 { + log!( + warn, + "a controller is associated with more than one ledger ({} occurrences)", + count_controller_double + ); + }; + + if count_double != 0 { + log!(warn, "single tuple of (stash, controller) pair bonds more than one ledger ({} occurrences)", count_double); + } + + if count_none != 0 { + log!(warn, "inconsistent bonded state: (stash, controller) pair missing associated ledger ({} occurrences)", count_none); + } + + Ok(()) + } + + /// Invariants: + /// * A bonded ledger should always have an assigned `Payee`. + /// * The number of entries in `Payee` and of bonded staking ledgers *must* match. + /// * The stash account in the ledger must match that of the bonded account. + fn check_payees() -> Result<(), TryRuntimeError> { + for (stash, _) in Bonded::::iter() { + ensure!(Payee::::get(&stash).is_some(), "bonded ledger does not have payee set"); + } + + ensure!( + (Ledger::::iter().count() == Payee::::iter().count()) && + (Ledger::::iter().count() == Bonded::::iter().count()), + "number of entries in payee storage items does not match the number of bonded ledgers", + ); + + Ok(()) + } + + /// Invariants: + /// * Number of voters in `VoterList` match that of the number of Nominators and Validators in + /// the system (validator is both voter and target). + /// * Number of targets in `TargetList` matches the number of validators in the system. + /// * Current validator count is bounded by the election provider's max winners. + fn check_count() -> Result<(), TryRuntimeError> { + ensure!( + ::VoterList::count() == + Nominators::::count() + Validators::::count(), + "wrong external count" + ); + ensure!( + ::TargetList::count() == Validators::::count(), + "wrong external count" + ); + let max_validators_bound = crate::MaxWinnersOf::::get(); + let max_winners_per_page_bound = crate::MaxWinnersPerPageOf::::get(); + ensure!( + max_validators_bound >= max_winners_per_page_bound, + "max validators should be higher than per page bounds" + ); + ensure!(ValidatorCount::::get() <= max_validators_bound, Error::::TooManyValidators); + Ok(()) + } + + /// Invariants: + /// * Stake consistency: ledger.total == ledger.active + sum(ledger.unlocking). + /// * The ledger's controller and stash matches the associated `Bonded` tuple. + /// * Staking locked funds for every bonded stash (non virtual stakers) should be the same as + /// its ledger's total. + /// * For virtual stakers, locked funds should be zero and payee should be non-stash account. + /// * Staking ledger and bond are not corrupted. + fn check_ledgers() -> Result<(), TryRuntimeError> { + Bonded::::iter() + .map(|(stash, ctrl)| { + // ensure locks consistency. + if VirtualStakers::::contains_key(stash.clone()) { + ensure!( + asset::staked::(&stash) == Zero::zero(), + "virtual stakers should not have any staked balance" + ); + ensure!( + >::get(stash.clone()).unwrap() == stash.clone(), + "stash and controller should be same" + ); + ensure!( + Ledger::::get(stash.clone()).unwrap().stash == stash, + "ledger corrupted for virtual staker" + ); + ensure!( + frame_system::Pallet::::account_nonce(&stash).is_zero(), + "virtual stakers are keyless and should not have any nonce" + ); + let reward_destination = >::get(stash.clone()).unwrap(); + if let RewardDestination::Account(payee) = reward_destination { + ensure!( + payee != stash.clone(), + "reward destination should not be same as stash for virtual staker" + ); + } else { + return Err(DispatchError::Other( + "reward destination must be of account variant for virtual staker", + )); + } + } else { + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + "bond, ledger and/or staking hold inconsistent for a bonded stash." + ); + } + + // ensure ledger consistency. + Self::ensure_ledger_consistent(ctrl) + }) + .collect::, _>>()?; + Ok(()) + } + + /// Invariants: + /// * For each paged era exposed validator, check if the exposure total is sane (exposure.total + /// = exposure.own + exposure.own). + /// * Paged exposures metadata (`ErasStakersOverview`) matches the paged exposures state. + fn check_paged_exposures() -> Result<(), TryRuntimeError> { + use alloc::collections::btree_map::BTreeMap; + use sp_staking::PagedExposureMetadata; + + // Sanity check for the paged exposure of the active era. + let mut exposures: BTreeMap>> = + BTreeMap::new(); + let era = ActiveEra::::get().unwrap().index; + let accumulator_default = PagedExposureMetadata { + total: Zero::zero(), + own: Zero::zero(), + nominator_count: 0, + page_count: 0, + }; + + ErasStakersPaged::::iter_prefix((era,)) + .map(|((validator, _page), expo)| { + ensure!( + expo.page_total == + expo.others.iter().map(|e| e.value).fold(Zero::zero(), |acc, x| acc + x), + "wrong total exposure for the page.", + ); + + let metadata = exposures.get(&validator).unwrap_or(&accumulator_default); + exposures.insert( + validator, + PagedExposureMetadata { + total: metadata.total + expo.page_total, + own: metadata.own, + nominator_count: metadata.nominator_count + expo.others.len() as u32, + page_count: metadata.page_count + 1, + }, + ); + + Ok(()) + }) + .collect::>()?; + + exposures + .iter() + .map(|(validator, metadata)| { + let actual_overview = ErasStakersOverview::::get(era, validator); + + ensure!(actual_overview.is_some(), "No overview found for a paged exposure"); + let actual_overview = actual_overview.unwrap(); + + ensure!( + actual_overview.total == metadata.total + actual_overview.own, + "Exposure metadata does not have correct total exposed stake." + ); + ensure!( + actual_overview.nominator_count == metadata.nominator_count, + "Exposure metadata does not have correct count of nominators." + ); + ensure!( + actual_overview.page_count == metadata.page_count, + "Exposure metadata does not have correct count of pages." + ); + + Ok(()) + }) + .collect::>() + } + + fn ensure_ledger_consistent(ctrl: T::AccountId) -> Result<(), TryRuntimeError> { + // ensures ledger.total == ledger.active + sum(ledger.unlocking). + let ledger = Self::ledger(StakingAccount::Controller(ctrl.clone()))?; + + let real_total: BalanceOf = + ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); + ensure!(real_total == ledger.total, "ledger.total corrupt"); + + Ok(()) + } +} diff --git a/substrate/frame/staking-async/src/pallet/mod.rs b/substrate/frame/staking-async/src/pallet/mod.rs new file mode 100644 index 0000000000000..408e0aa0886b7 --- /dev/null +++ b/substrate/frame/staking-async/src/pallet/mod.rs @@ -0,0 +1,2405 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! `pallet-staking-async`'s main `pallet` module. + +use crate::{ + asset, slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, + EraRewardPoints, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, + NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, + StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, +}; +use alloc::{format, vec::Vec}; +use codec::Codec; +use frame_election_provider_support::{ElectionProvider, SortedListProvider, VoteWeight}; +use frame_support::{ + assert_ok, + pallet_prelude::*, + traits::{ + fungible::{ + hold::{Balanced as FunHoldBalanced, Mutate as FunHoldMutate}, + Mutate, Mutate as FunMutate, + }, + Contains, Defensive, DefensiveSaturating, EnsureOrigin, Get, InspectLockableCurrency, + Nothing, OnUnbalanced, + }, + weights::Weight, + BoundedBTreeSet, BoundedVec, +}; +use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; +pub use impls::*; +use rand::seq::SliceRandom; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, +}; +use sp_core::{sr25519::Pair as SrPair, Pair}; +use sp_runtime::{ + traits::{StaticLookup, Zero}, + ArithmeticError, Perbill, Percent, +}; +use sp_staking::{ + EraIndex, Page, SessionIndex, + StakingAccount::{self, Controller, Stash}, + StakingInterface, +}; + +mod impls; + +// The speculative number of spans are used as an input of the weight annotation of +// [`Call::unbond`], as the post dispatch weight may depend on the number of slashing span on the +// account which is not provided as an input. The value set should be conservative but sensible. +pub(crate) const SPECULATIVE_NUM_SPANS: u32 = 32; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use crate::{session_rotation, PagedExposureMetadata, SnapshotStatus}; + use codec::HasCompact; + use frame_election_provider_support::{ElectionDataProvider, PageIndex}; + + /// The in-code storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(17); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + /// Possible operations on the configuration values of this pallet. + #[derive(TypeInfo, Debug, Clone, Encode, Decode, DecodeWithMemTracking, PartialEq)] + pub enum ConfigOp { + /// Don't change. + Noop, + /// Set the given value. + Set(T), + /// Remove from storage. + Remove, + } + + #[pallet::config(with_default)] + pub trait Config: frame_system::Config { + /// The old trait for staking balance. Deprecated and only used for migrating old ledgers. + #[pallet::no_default] + type OldCurrency: InspectLockableCurrency< + Self::AccountId, + Moment = BlockNumberFor, + Balance = Self::CurrencyBalance, + >; + + /// The staking balance. + #[pallet::no_default] + type Currency: FunHoldMutate< + Self::AccountId, + Reason = Self::RuntimeHoldReason, + Balance = Self::CurrencyBalance, + > + FunMutate + + FunHoldBalanced; + + /// Overarching hold reason. + #[pallet::no_default_bounds] + type RuntimeHoldReason: From; + + /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to + /// `From`. + type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned + + codec::FullCodec + + DecodeWithMemTracking + + HasCompact + + Copy + + MaybeSerializeDeserialize + + core::fmt::Debug + + Default + + From + + TypeInfo + + Send + + Sync + + MaxEncodedLen; + + /// Convert a balance into a number used for election calculation. This must fit into a + /// `u64` but is allowed to be sensibly lossy. The `u64` is used to communicate with the + /// [`frame_election_provider_support`] crate which accepts u64 numbers and does operations + /// in 128. + /// Consequently, the backward convert is used convert the u128s from sp-elections back to a + /// [`BalanceOf`]. + #[pallet::no_default_bounds] + type CurrencyToVote: sp_staking::currency_to_vote::CurrencyToVote>; + + /// Something that provides the election functionality. + #[pallet::no_default] + type ElectionProvider: ElectionProvider< + AccountId = Self::AccountId, + BlockNumber = BlockNumberFor, + // we only accept an election provider that has staking as data provider. + DataProvider = Pallet, + >; + + /// Something that defines the maximum number of nominations per nominator. + #[pallet::no_default_bounds] + type NominationsQuota: NominationsQuota>; + + /// Number of eras to keep in history. + /// + /// Following information is kept for eras in `[current_era - + /// HistoryDepth, current_era]`: `ErasValidatorPrefs`, `ErasValidatorReward`, + /// `ErasRewardPoints`, `ErasTotalStake`, `ErasStartSessionIndex`, `ErasClaimedRewards`, + /// `ErasStakersPaged`, `ErasStakersOverview`. + /// + /// Must be more than the number of eras delayed by session. + /// I.e. active era must always be in history. I.e. `active_era > + /// current_era - history_depth` must be guaranteed. + /// + /// If migrating an existing pallet from storage value to config value, + /// this should be set to same value or greater as in storage. + #[pallet::constant] + type HistoryDepth: Get; + + /// Tokens have been minted and are unused for validator-reward. + /// See [Era payout](./index.html#era-payout). + #[pallet::no_default_bounds] + type RewardRemainder: OnUnbalanced>; + + /// Handler for the unbalanced reduction when slashing a staker. + #[pallet::no_default_bounds] + type Slash: OnUnbalanced>; + + /// Handler for the unbalanced increment when rewarding a staker. + /// NOTE: in most cases, the implementation of `OnUnbalanced` should modify the total + /// issuance. + #[pallet::no_default_bounds] + type Reward: OnUnbalanced>; + + /// Number of sessions per era. + #[pallet::constant] + type SessionsPerEra: Get; + + /// Number of sessions before the end of an era when the election for the next era will + /// start. + /// + /// - This determines how many sessions **before** the last session of the era the staking + /// election process should begin. + /// - The value is bounded between **1** (election starts at the beginning of the last + /// session) and `SessionsPerEra` (election starts at the beginning of the first session + /// of the era). + /// + /// ### Example: + /// - If `SessionsPerEra = 6` and `PlanningEraOffset = 1`, the election starts at the + /// beginning of session `6 - 1 = 5`. + /// - If `PlanningEraOffset = 6`, the election starts at the beginning of session `6 - 6 = + /// 0`, meaning it starts at the very beginning of the era. + #[pallet::constant] + type PlanningEraOffset: Get; + + /// Number of eras that staked funds must remain bonded for. + #[pallet::constant] + type BondingDuration: Get; + + /// Number of eras that slashes are deferred by, after computation. + /// + /// This should be less than the bonding duration. Set to 0 if slashes + /// should be applied immediately, without opportunity for intervention. + #[pallet::constant] + type SlashDeferDuration: Get; + + /// The origin which can manage less critical staking parameters that does not require root. + /// + /// Supported actions: (1) cancel deferred slash, (2) set minimum commission. + #[pallet::no_default] + type AdminOrigin: EnsureOrigin; + + /// The payout for validators and the system for the current era. + /// See [Era payout](./index.html#era-payout). + #[pallet::no_default] + type EraPayout: EraPayout>; + + /// The maximum size of each `T::ExposurePage`. + /// + /// An `ExposurePage` is weakly bounded to a maximum of `MaxExposurePageSize` + /// nominators. + /// + /// For older non-paged exposure, a reward payout was restricted to the top + /// `MaxExposurePageSize` nominators. This is to limit the i/o cost for the + /// nominator payout. + /// + /// Note: `MaxExposurePageSize` is used to bound `ErasClaimedRewards` and is unsafe to + /// reduce without handling it in a migration. + #[pallet::constant] + type MaxExposurePageSize: Get; + + /// The absolute maximum of winner validators this pallet should return. + /// + /// As this pallet supports multi-block election, the set of winner validators *per + /// election* is bounded by this type. + #[pallet::constant] + type MaxValidatorSet: Get; + + /// Something that provides a best-effort sorted list of voters aka electing nominators, + /// used for NPoS election. + /// + /// The changes to nominators are reported to this. Moreover, each validator's self-vote is + /// also reported as one independent vote. + /// + /// To keep the load off the chain as much as possible, changes made to the staked amount + /// via rewards and slashes are not reported and thus need to be manually fixed by the + /// staker. In case of `bags-list`, this always means using `rebag` and `putInFrontOf`. + /// + /// Invariant: what comes out of this list will always be a nominator. + #[pallet::no_default] + type VoterList: SortedListProvider; + + /// WIP: This is a noop as of now, the actual business logic that's described below is going + /// to be introduced in a follow-up PR. + /// + /// Something that provides a best-effort sorted list of targets aka electable validators, + /// used for NPoS election. + /// + /// The changes to the approval stake of each validator are reported to this. This means any + /// change to: + /// 1. The stake of any validator or nominator. + /// 2. The targets of any nominator + /// 3. The role of any staker (e.g. validator -> chilled, nominator -> validator, etc) + /// + /// Unlike `VoterList`, the values in this list are always kept up to date with reward and + /// slash as well, and thus represent the accurate approval stake of all account being + /// nominated by nominators. + /// + /// Note that while at the time of nomination, all targets are checked to be real + /// validators, they can chill at any point, and their approval stakes will still be + /// recorded. This implies that what comes out of iterating this list MIGHT NOT BE AN ACTIVE + /// VALIDATOR. + #[pallet::no_default] + type TargetList: SortedListProvider>; + + /// The maximum number of `unlocking` chunks a [`StakingLedger`] can + /// have. Effectively determines how many unique eras a staker may be + /// unbonding in. + /// + /// Note: `MaxUnlockingChunks` is used as the upper bound for the + /// `BoundedVec` item `StakingLedger.unlocking`. Setting this value + /// lower than the existing value can lead to inconsistencies in the + /// `StakingLedger` and will need to be handled properly in a runtime + /// migration. The test `reducing_max_unlocking_chunks_abrupt` shows + /// this effect. + #[pallet::constant] + type MaxUnlockingChunks: Get; + + /// The maximum amount of controller accounts that can be deprecated in one call. + type MaxControllersInDeprecationBatch: Get; + + /// Something that listens to staking updates and performs actions based on the data it + /// receives. + /// + /// WARNING: this only reports slashing and withdraw events for the time being. + #[pallet::no_default_bounds] + type EventListeners: sp_staking::OnStakingUpdate>; + + /// Maximum number of invulnerable validators. + #[pallet::constant] + type MaxInvulnerables: Get; + + /// Maximum number of disabled validators. + #[pallet::constant] + type MaxDisabledValidators: Get; + + /// Interface to talk to the RC-Client pallet, possibly sending election results to the + /// relay chain. + #[pallet::no_default] + type RcClientInterface: pallet_staking_async_rc_client::RcClientInterface< + AccountId = Self::AccountId, + >; + + #[pallet::no_default_bounds] + /// Filter some accounts from participating in staking. + /// + /// This is useful for example to blacklist an account that is participating in staking in + /// another way (such as pools). + type Filter: Contains; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } + + /// A reason for placing a hold on funds. + #[pallet::composite_enum] + pub enum HoldReason { + /// Funds on stake by a nominator or a validator. + #[codec(index = 0)] + Staking, + } + + /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. + pub mod config_preludes { + use super::*; + use frame_support::{derive_impl, parameter_types, traits::ConstU32}; + pub struct TestDefaultConfig; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig, no_aggregated_types)] + impl frame_system::DefaultConfig for TestDefaultConfig {} + + parameter_types! { + pub const SessionsPerEra: SessionIndex = 3; + pub const BondingDuration: EraIndex = 3; + } + + #[frame_support::register_default_impl(TestDefaultConfig)] + impl DefaultConfig for TestDefaultConfig { + #[inject_runtime_type] + type RuntimeHoldReason = (); + type CurrencyBalance = u128; + type CurrencyToVote = (); + type NominationsQuota = crate::FixedNominationsQuota<16>; + type HistoryDepth = ConstU32<84>; + type RewardRemainder = (); + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type PlanningEraOffset = ConstU32<1>; + type SlashDeferDuration = (); + type MaxExposurePageSize = ConstU32<64>; + type MaxUnlockingChunks = ConstU32<32>; + type MaxValidatorSet = ConstU32<100>; + type MaxControllersInDeprecationBatch = ConstU32<100>; + type MaxInvulnerables = ConstU32<20>; + type MaxDisabledValidators = ConstU32<100>; + type EventListeners = (); + type Filter = Nothing; + type WeightInfo = (); + } + } + + /// The ideal number of active validators. + #[pallet::storage] + pub type ValidatorCount = StorageValue<_, u32, ValueQuery>; + + /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're + /// easy to initialize and the performance hit is minimal (we expect no more than four + /// invulnerables) and restricted to testnets. + #[pallet::storage] + pub type Invulnerables = + StorageValue<_, BoundedVec, ValueQuery>; + + /// Map from all locked "stash" accounts to the controller account. + /// + /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. + #[pallet::storage] + pub type Bonded = StorageMap<_, Twox64Concat, T::AccountId, T::AccountId>; + + /// The minimum active bond to become and maintain the role of a nominator. + #[pallet::storage] + pub type MinNominatorBond = StorageValue<_, BalanceOf, ValueQuery>; + + /// The minimum active bond to become and maintain the role of a validator. + #[pallet::storage] + pub type MinValidatorBond = StorageValue<_, BalanceOf, ValueQuery>; + + /// The minimum active nominator stake of the last successful election. + #[pallet::storage] + pub type MinimumActiveStake = StorageValue<_, BalanceOf, ValueQuery>; + + /// The minimum amount of commission that validators can set. + /// + /// If set to `0`, no limit exists. + #[pallet::storage] + pub type MinCommission = StorageValue<_, Perbill, ValueQuery>; + + /// Map from all (unlocked) "controller" accounts to the info regarding the staking. + /// + /// Note: All the reads and mutations to this storage *MUST* be done through the methods exposed + /// by [`StakingLedger`] to ensure data and lock consistency. + #[pallet::storage] + pub type Ledger = StorageMap<_, Blake2_128Concat, T::AccountId, StakingLedger>; + + /// Where the reward payment should be made. Keyed by stash. + /// + /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. + #[pallet::storage] + pub type Payee = + StorageMap<_, Twox64Concat, T::AccountId, RewardDestination, OptionQuery>; + + /// The map from (wannabe) validator stash key to the preferences of that validator. + /// + /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. + #[pallet::storage] + pub type Validators = + CountedStorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; + + /// The maximum validator count before we stop allowing new validators to join. + /// + /// When this value is not set, no limits are enforced. + #[pallet::storage] + pub type MaxValidatorsCount = StorageValue<_, u32, OptionQuery>; + + /// The map from nominator stash key to their nomination preferences, namely the validators that + /// they wish to support. + /// + /// Note that the keys of this storage map might become non-decodable in case the + /// account's [`NominationsQuota::MaxNominations`] configuration is decreased. + /// In this rare case, these nominators + /// are still existent in storage, their key is correct and retrievable (i.e. `contains_key` + /// indicates that they exist), but their value cannot be decoded. Therefore, the non-decodable + /// nominators will effectively not-exist, until they re-submit their preferences such that it + /// is within the bounds of the newly set `Config::MaxNominations`. + /// + /// This implies that `::iter_keys().count()` and `::iter().count()` might return different + /// values for this map. Moreover, the main `::count()` is aligned with the former, namely the + /// number of keys that exist. + /// + /// Lastly, if any of the nominators become non-decodable, they can be chilled immediately via + /// [`Call::chill_other`] dispatchable by anyone. + /// + /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. + #[pallet::storage] + pub type Nominators = + CountedStorageMap<_, Twox64Concat, T::AccountId, Nominations>; + + /// Stakers whose funds are managed by other pallets. + /// + /// This pallet does not apply any locks on them, therefore they are only virtually bonded. They + /// are expected to be keyless accounts and hence should not be allowed to mutate their ledger + /// directly via this pallet. Instead, these accounts are managed by other pallets and accessed + /// via low level apis. We keep track of them to do minimal integrity checks. + #[pallet::storage] + pub type VirtualStakers = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; + + /// The maximum nominator count before we stop allowing new validators to join. + /// + /// When this value is not set, no limits are enforced. + #[pallet::storage] + pub type MaxNominatorsCount = StorageValue<_, u32, OptionQuery>; + + // --- AUDIT NOTE: the following storage items should only be controlled by `Rotator` + + /// The current planned era index. + /// + /// This is the latest planned era, depending on how the Session pallet queues the validator + /// set, it might be active or not. + #[pallet::storage] + pub type CurrentEra = StorageValue<_, EraIndex>; + + /// The active era information, it holds index and start. + /// + /// The active era is the era being currently rewarded. Validator set of this era must be + /// equal to what is RC's session pallet. + #[pallet::storage] + pub type ActiveEra = StorageValue<_, ActiveEraInfo>; + + /// A mapping from still-bonded eras to the first session index of that era. + /// + /// Must contains information for eras for the range: + /// `[active_era - bounding_duration; active_era]` + #[pallet::storage] + #[pallet::unbounded] + pub(crate) type BondedEras = + StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; + + // --- AUDIT Note: end of storage items controlled by `Rotator`. + + /// The session index at which the era start for the last [`Config::HistoryDepth`] eras. + /// + /// Note: This tracks the STARTING session (i.e. session index when era start being ACTIVE) + /// for the eras in `[CurrentEra - HISTORY_DEPTH, CurrentEra]`. + #[pallet::storage] + pub type ErasStartSessionIndex = StorageMap<_, Twox64Concat, EraIndex, SessionIndex>; + + /// Summary of validator exposure at a given era. + /// + /// This contains the total stake in support of the validator and their own stake. In addition, + /// it can also be used to get the number of nominators backing this validator and the number of + /// exposure pages they are divided into. The page count is useful to determine the number of + /// pages of rewards that needs to be claimed. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// Should only be accessed through `Eras`. + /// + /// Is it removed after [`Config::HistoryDepth`] eras. + /// If stakers hasn't been set or has been removed then empty overview is returned. + #[pallet::storage] + pub type ErasStakersOverview = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + PagedExposureMetadata>, + OptionQuery, + >; + + /// Paginated exposure of a validator at given era. + /// + /// This is keyed first by the era index to allow bulk deletion, then stash account and finally + /// the page. Should only be accessed through `Eras`. + /// + /// This is cleared after [`Config::HistoryDepth`] eras. + #[pallet::storage] + #[pallet::unbounded] + pub type ErasStakersPaged = StorageNMap< + _, + ( + NMapKey, + NMapKey, + NMapKey, + ), + ExposurePage>, + OptionQuery, + >; + + /// History of claimed paged rewards by era and validator. + /// + /// This is keyed by era and validator stash which maps to the set of page indexes which have + /// been claimed. + /// + /// It is removed after [`Config::HistoryDepth`] eras. + #[pallet::storage] + #[pallet::unbounded] + pub type ErasClaimedRewards = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + Vec, + ValueQuery, + >; + + /// Exposure of validator at era with the preferences of validators. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after [`Config::HistoryDepth`] eras. + // If prefs hasn't been set or has been removed then 0 commission is returned. + #[pallet::storage] + pub type ErasValidatorPrefs = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + ValidatorPrefs, + ValueQuery, + >; + + /// The total validator era payout for the last [`Config::HistoryDepth`] eras. + /// + /// Eras that haven't finished yet or has been removed doesn't have reward. + #[pallet::storage] + pub type ErasValidatorReward = StorageMap<_, Twox64Concat, EraIndex, BalanceOf>; + + /// Rewards for the last [`Config::HistoryDepth`] eras. + /// If reward hasn't been set or has been removed then 0 reward is returned. + #[pallet::storage] + #[pallet::unbounded] + pub type ErasRewardPoints = + StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; + + /// The total amount staked for the last [`Config::HistoryDepth`] eras. + /// If total hasn't been set or has been removed then 0 stake is returned. + #[pallet::storage] + pub type ErasTotalStake = + StorageMap<_, Twox64Concat, EraIndex, BalanceOf, ValueQuery>; + + /// Mode of era forcing. + #[pallet::storage] + pub type ForceEra = StorageValue<_, Forcing, ValueQuery>; + + /// Maximum staked rewards, i.e. the percentage of the era inflation that + /// is used for stake rewards. + /// See [Era payout](./index.html#era-payout). + #[pallet::storage] + pub type MaxStakedRewards = StorageValue<_, Percent, OptionQuery>; + + /// The percentage of the slash that is distributed to reporters. + /// + /// The rest of the slashed value is handled by the `Slash`. + #[pallet::storage] + pub type SlashRewardFraction = StorageValue<_, Perbill, ValueQuery>; + + /// The amount of currency given to reporters of a slash event which was + /// canceled by extraordinary circumstances (e.g. governance). + #[pallet::storage] + pub type CanceledSlashPayout = StorageValue<_, BalanceOf, ValueQuery>; + + /// Stores reported offences in a queue until they are processed in subsequent blocks. + /// + /// Each offence is recorded under the corresponding era index and the offending validator's + /// account. If an offence spans multiple pages, only one page is processed at a time. Offences + /// are handled sequentially, with their associated slashes computed and stored in + /// `UnappliedSlashes`. These slashes are then applied in a future era as determined by + /// `SlashDeferDuration`. + /// + /// Any offences tied to an era older than `BondingDuration` are automatically dropped. + /// Processing always prioritizes the oldest era first. + #[pallet::storage] + pub type OffenceQueue = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + slashing::OffenceRecord, + >; + + /// Tracks the eras that contain offences in `OffenceQueue`, sorted from **earliest to latest**. + /// + /// - This ensures efficient retrieval of the oldest offence without iterating through + /// `OffenceQueue`. + /// - When a new offence is added to `OffenceQueue`, its era is **inserted in sorted order** + /// if not already present. + /// - When all offences for an era are processed, it is **removed** from this list. + /// - The maximum length of this vector is bounded by `BondingDuration`. + /// + /// This eliminates the need for expensive iteration and sorting when fetching the next offence + /// to process. + #[pallet::storage] + pub type OffenceQueueEras = StorageValue<_, BoundedVec>; + + /// Tracks the currently processed offence record from the `OffenceQueue`. + /// + /// - When processing offences, an offence record is **popped** from the oldest era in + /// `OffenceQueue` and stored here. + /// - The function `process_offence` reads from this storage, processing one page of exposure at + /// a time. + /// - After processing a page, the `exposure_page` count is **decremented** until it reaches + /// zero. + /// - Once fully processed, the offence record is removed from this storage. + /// + /// This ensures that offences are processed incrementally, preventing excessive computation + /// in a single block while maintaining correct slashing behavior. + #[pallet::storage] + pub type ProcessingOffence = + StorageValue<_, (EraIndex, T::AccountId, slashing::OffenceRecord)>; + + /// All unapplied slashes that are queued for later. + #[pallet::storage] + pub type UnappliedSlashes = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + // Unique key for unapplied slashes: (validator, slash fraction, page index). + (T::AccountId, Perbill, u32), + UnappliedSlash, + OptionQuery, + >; + + /// All slashing events on validators, mapped by era to the highest slash proportion + /// and slash value of the era. + #[pallet::storage] + pub(crate) type ValidatorSlashInEra = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + (Perbill, BalanceOf), + >; + + /// All slashing events on nominators, mapped by era to the highest slash value of the era. + #[pallet::storage] + pub(crate) type NominatorSlashInEra = + StorageDoubleMap<_, Twox64Concat, EraIndex, Twox64Concat, T::AccountId, BalanceOf>; + + /// Slashing spans for stash accounts. + #[pallet::storage] + #[pallet::unbounded] + pub type SlashingSpans = + StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; + + /// Records information about the maximum slash of a stash within a slashing span, + /// as well as how much reward has been paid out. + #[pallet::storage] + pub(crate) type SpanSlash = StorageMap< + _, + Twox64Concat, + (T::AccountId, slashing::SpanIndex), + slashing::SpanRecord>, + ValueQuery, + >; + + /// The threshold for when users can start calling `chill_other` for other validators / + /// nominators. The threshold is compared to the actual number of validators / nominators + /// (`CountFor*`) in the system compared to the configured max (`Max*Count`). + #[pallet::storage] + pub(crate) type ChillThreshold = StorageValue<_, Percent, OptionQuery>; + + /// Voter snapshot progress status. + /// + /// If the status is `Ongoing`, it keeps a cursor of the last voter retrieved to proceed when + /// creating the next snapshot page. + #[pallet::storage] + pub(crate) type VoterSnapshotStatus = + StorageValue<_, SnapshotStatus, ValueQuery>; + + /// Keeps track of an ongoing multi-page election solution request. + /// + /// If `Some(_)``, it is the next page that we intend to elect. If `None`, we are not in the + /// election process. + /// + /// This is only set in multi-block elections. Should always be `None` otherwise. + #[pallet::storage] + pub(crate) type NextElectionPage = StorageValue<_, PageIndex, OptionQuery>; + + /// A bounded list of the "electable" stashes that resulted from a successful election. + #[pallet::storage] + pub(crate) type ElectableStashes = + StorageValue<_, BoundedBTreeSet, ValueQuery>; + + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound, frame_support::DebugNoBound)] + pub struct GenesisConfig { + pub validator_count: u32, + pub invulnerables: BoundedVec, + pub force_era: Forcing, + pub slash_reward_fraction: Perbill, + pub canceled_payout: BalanceOf, + pub stakers: Vec<(T::AccountId, BalanceOf, crate::StakerStatus)>, + pub min_nominator_bond: BalanceOf, + pub min_validator_bond: BalanceOf, + pub max_validator_count: Option, + pub max_nominator_count: Option, + /// Create the given number of validators and nominators. + /// + /// These account need not be in the endowment list of balances, and are auto-topped up + /// here. + /// + /// Useful for testing genesis config. + pub dev_stakers: Option<(u32, u32)>, + /// initial active era, corresponding session index and start timestamp. + pub active_era: (u32, u32, u64), + } + + impl GenesisConfig { + fn generate_endowed_bonded_account(derivation: &str, rng: &mut ChaChaRng) -> T::AccountId { + let pair: SrPair = Pair::from_string(&derivation, None) + .expect(&format!("Failed to parse derivation string: {derivation}")); + let who = T::AccountId::decode(&mut &pair.public().encode()[..]) + .expect(&format!("Failed to decode public key from pair: {:?}", pair.public())); + + let (min, max) = T::VoterList::range(); + let stake = BalanceOf::::from(rng.next_u64().min(max).max(min)); + let two: BalanceOf = 2u32.into(); + + assert_ok!(T::Currency::mint_into(&who, stake * two)); + assert_ok!(>::bond( + T::RuntimeOrigin::from(Some(who.clone()).into()), + stake, + RewardDestination::Staked, + )); + who + } + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + crate::log!(trace, "initializing with {:?}", self); + ValidatorCount::::put(self.validator_count); + assert!( + self.invulnerables.len() as u32 <= T::MaxInvulnerables::get(), + "Too many invulnerable validators at genesis." + ); + >::put(&self.invulnerables); + ForceEra::::put(self.force_era); + CanceledSlashPayout::::put(self.canceled_payout); + SlashRewardFraction::::put(self.slash_reward_fraction); + MinNominatorBond::::put(self.min_nominator_bond); + MinValidatorBond::::put(self.min_validator_bond); + if let Some(x) = self.max_validator_count { + MaxValidatorsCount::::put(x); + } + if let Some(x) = self.max_nominator_count { + MaxNominatorsCount::::put(x); + } + + for &(ref stash, balance, ref status) in &self.stakers { + crate::log!( + trace, + "inserting genesis staker: {:?} => {:?} => {:?}", + stash, + balance, + status + ); + assert!( + asset::free_to_stake::(stash) >= balance, + "Stash does not have enough balance to bond." + ); + assert_ok!(>::bond( + T::RuntimeOrigin::from(Some(stash.clone()).into()), + balance, + RewardDestination::Staked, + )); + assert_ok!(match status { + crate::StakerStatus::Validator => >::validate( + T::RuntimeOrigin::from(Some(stash.clone()).into()), + Default::default(), + ), + crate::StakerStatus::Nominator(votes) => >::nominate( + T::RuntimeOrigin::from(Some(stash.clone()).into()), + votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), + ), + _ => Ok(()), + }); + assert!( + ValidatorCount::::get() <= + ::MaxWinnersPerPage::get() * + ::Pages::get() + ); + } + + // all voters are reported to the `VoterList`. + assert_eq!( + T::VoterList::count(), + Nominators::::count() + Validators::::count(), + "not all genesis stakers were inserted into sorted list provider, something is wrong." + ); + + // now generate the dev stakers, after all else is setup + if let Some((validators, nominators)) = self.dev_stakers { + crate::log!( + debug, + "generating dev stakers: validators: {}, nominators: {}", + validators, + nominators + ); + let base_derivation = "//staker//{}"; + + // it is okay for the randomness to be the same on every call. If we want different, + // we can make `base_derivation` configurable. + let mut rng = + ChaChaRng::from_seed(base_derivation.using_encoded(sp_core::blake2_256)); + + let validators = (0..validators) + .map(|index| { + let derivation = + base_derivation.replace("{}", &format!("validator{}", index)); + let who = Self::generate_endowed_bonded_account(&derivation, &mut rng); + assert_ok!(>::validate( + T::RuntimeOrigin::from(Some(who.clone()).into()), + Default::default(), + )); + who + }) + .collect::>(); + + (0..nominators).for_each(|index| { + let derivation = base_derivation.replace("{}", &format!("nominator{}", index)); + let who = Self::generate_endowed_bonded_account(&derivation, &mut rng); + + let random_nominations = validators + .choose_multiple(&mut rng, MaxNominationsOf::::get() as usize) + .map(|v| v.clone()) + .collect::>(); + + assert_ok!(>::nominate( + T::RuntimeOrigin::from(Some(who.clone()).into()), + random_nominations.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), + )); + }) + } + + let (active_era, session_index, timestamp) = self.active_era; + ActiveEra::::put(ActiveEraInfo { index: active_era, start: Some(timestamp) }); + // at genesis, we do not have any new planned era. + CurrentEra::::put(active_era); + ErasStartSessionIndex::::insert(active_era, session_index); + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// The era payout has been set; the first balance is the validator-payout; the second is + /// the remainder from the maximum amount of reward. + EraPaid { + era_index: EraIndex, + validator_payout: BalanceOf, + remainder: BalanceOf, + }, + /// The nominator has been rewarded by this amount to this destination. + Rewarded { + stash: T::AccountId, + dest: RewardDestination, + amount: BalanceOf, + }, + /// A staker (validator or nominator) has been slashed by the given amount. + Slashed { + staker: T::AccountId, + amount: BalanceOf, + }, + /// An old slashing report from a prior era was discarded because it could + /// not be processed. + OldSlashingReportDiscarded { + session_index: SessionIndex, + }, + /// An account has bonded this amount. \[stash, amount\] + /// + /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, + /// it will not be emitted for staking rewards when they are added to stake. + Bonded { + stash: T::AccountId, + amount: BalanceOf, + }, + /// An account has unbonded this amount. + Unbonded { + stash: T::AccountId, + amount: BalanceOf, + }, + /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` + /// from the unlocking queue. + Withdrawn { + stash: T::AccountId, + amount: BalanceOf, + }, + /// A subsequent event of `Withdrawn`, indicating that `stash` was fully removed from the + /// system. + StakerRemoved { + stash: T::AccountId, + }, + /// A nominator has been kicked from a validator. + Kicked { + nominator: T::AccountId, + stash: T::AccountId, + }, + /// An account has stopped participating as either a validator or nominator. + Chilled { + stash: T::AccountId, + }, + /// A Page of stakers rewards are getting paid. `next` is `None` if all pages are claimed. + PayoutStarted { + era_index: EraIndex, + validator_stash: T::AccountId, + page: Page, + next: Option, + }, + /// A validator has set their preferences. + ValidatorPrefsSet { + stash: T::AccountId, + prefs: ValidatorPrefs, + }, + /// Voters size limit reached. + SnapshotVotersSizeExceeded { + size: u32, + }, + /// Targets size limit reached. + SnapshotTargetsSizeExceeded { + size: u32, + }, + ForceEra { + mode: Forcing, + }, + /// Report of a controller batch deprecation. + ControllerBatchDeprecated { + failures: u32, + }, + /// Staking balance migrated from locks to holds, with any balance that could not be held + /// is force withdrawn. + CurrencyMigrated { + stash: T::AccountId, + force_withdraw: BalanceOf, + }, + /// A page from a multi-page election was fetched. A number of these are followed by + /// `StakersElected`. + /// + /// `Ok(count)` indicates the give number of stashes were added. + /// `Err(index)` indicates that the stashes after index were dropped. + /// `Err(0)` indicates that an error happened but no stashes were dropped nor added. + /// + /// The error indicates that a number of validators were dropped due to excess size, but + /// the overall election will continue. + PagedElectionProceeded { + page: PageIndex, + result: Result, + }, + /// An offence for the given validator, for the given percentage of their stake, at the + /// given era as been reported. + OffenceReported { + offence_era: EraIndex, + validator: T::AccountId, + fraction: Perbill, + }, + /// An offence has been processed and the corresponding slash has been computed. + SlashComputed { + offence_era: EraIndex, + slash_era: EraIndex, + offender: T::AccountId, + page: u32, + }, + /// An unapplied slash has been cancelled. + SlashCancelled { + slash_era: EraIndex, + slash_key: (T::AccountId, Perbill, u32), + payout: BalanceOf, + }, + /// Session change has been triggered. + /// + /// If planned_era is one era ahead of active_era, it implies new era is being planned and + /// election is ongoing. + SessionRotated { + starting_session: SessionIndex, + active_era: EraIndex, + planned_era: EraIndex, + }, + } + + #[pallet::error] + #[derive(PartialEq)] + pub enum Error { + /// Not a controller account. + NotController, + /// Not a stash account. + NotStash, + /// Stash is already bonded. + AlreadyBonded, + /// Controller is already paired. + AlreadyPaired, + /// Targets cannot be empty. + EmptyTargets, + /// Duplicate index. + DuplicateIndex, + /// Slash record not found. + InvalidSlashRecord, + /// Cannot have a validator or nominator role, with value less than the minimum defined by + /// governance (see `MinValidatorBond` and `MinNominatorBond`). If unbonding is the + /// intention, `chill` first to remove one's role as validator/nominator. + InsufficientBond, + /// Can not schedule more unlock chunks. + NoMoreChunks, + /// Can not rebond without unlocking chunks. + NoUnlockChunk, + /// Attempting to target a stash that still has funds. + FundedTarget, + /// Invalid era to reward. + InvalidEraToReward, + /// Invalid number of nominations. + InvalidNumberOfNominations, + /// Rewards for this era have already been claimed for this validator. + AlreadyClaimed, + /// No nominators exist on this page. + InvalidPage, + /// Incorrect previous history depth input provided. + IncorrectHistoryDepth, + /// Incorrect number of slashing spans provided. + IncorrectSlashingSpans, + /// Internal state has become somehow corrupted and the operation cannot continue. + BadState, + /// Too many nomination targets supplied. + TooManyTargets, + /// A nomination target was supplied that was blocked or otherwise not a validator. + BadTarget, + /// The user has enough bond and thus cannot be chilled forcefully by an external person. + CannotChillOther, + /// There are too many nominators in the system. Governance needs to adjust the staking + /// settings to keep things safe for the runtime. + TooManyNominators, + /// There are too many validator candidates in the system. Governance needs to adjust the + /// staking settings to keep things safe for the runtime. + TooManyValidators, + /// Commission is too low. Must be at least `MinCommission`. + CommissionTooLow, + /// Some bound is not met. + BoundNotMet, + /// Used when attempting to use deprecated controller account logic. + ControllerDeprecated, + /// Cannot reset a ledger. + CannotRestoreLedger, + /// Provided reward destination is not allowed. + RewardDestinationRestricted, + /// Not enough funds available to withdraw. + NotEnoughFunds, + /// Operation not allowed for virtual stakers. + VirtualStakerNotAllowed, + /// Stash could not be reaped as other pallet might depend on it. + CannotReapStash, + /// The stake of this account is already migrated to `Fungible` holds. + AlreadyMigrated, + /// Era not yet started. + EraNotStarted, + /// Account is restricted from participation in staking. This may happen if the account is + /// staking in another way already, such as via pool. + Restricted, + } + + impl Pallet { + /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. + pub(crate) fn apply_unapplied_slashes(active_era: EraIndex) -> Weight { + let mut slashes = UnappliedSlashes::::iter_prefix(&active_era).take(1); + if let Some((key, slash)) = slashes.next() { + crate::log!( + debug, + "🦹 found slash {:?} scheduled to be executed in era {:?}", + slash, + active_era, + ); + let offence_era = active_era.saturating_sub(T::SlashDeferDuration::get()); + slashing::apply_slash::(slash, offence_era); + // remove the slash + UnappliedSlashes::::remove(&active_era, &key); + T::WeightInfo::apply_slash() + } else { + T::DbWeight::get().reads(1) + } + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_now: BlockNumberFor) -> Weight { + // process our queue. + let mut consumed_weight = slashing::process_offence::(); + + // apply any pending slashes after `SlashDeferDuration`. + consumed_weight.saturating_accrue(T::DbWeight::get().reads(1)); + if let Some(active_era) = ActiveEra::::get() { + let slash_weight = Self::apply_unapplied_slashes(active_era.index); + consumed_weight.saturating_accrue(slash_weight); + } + + // maybe plan eras and stuff. Note that this is benchmark as a part of the + // election-provider's benchmarks. + session_rotation::EraElectionPlanner::::maybe_fetch_election_results(); + consumed_weight + } + + fn integrity_test() { + // ensure that we funnel the correct value to the `DataProvider::MaxVotesPerVoter`; + assert_eq!( + MaxNominationsOf::::get(), + ::MaxVotesPerVoter::get() + ); + // and that MaxNominations is always greater than 1, since we count on this. + assert!(!MaxNominationsOf::::get().is_zero()); + + assert!( + T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, + "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", + T::SlashDeferDuration::get(), + T::BondingDuration::get(), + ); + } + + #[cfg(feature = "try-runtime")] + fn try_state(n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state(n) + } + } + + #[pallet::call] + impl Pallet { + /// Take the origin account as a stash and lock up `value` of its balance. `controller` will + /// be the account that controls it. + /// + /// `value` must be more than the `minimum_balance` specified by `T::Currency`. + /// + /// The dispatch origin for this call must be _Signed_ by the stash account. + /// + /// Emits `Bonded`. + /// + /// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned + /// unless the `origin` falls below _existential deposit_ (or equal to 0) and gets removed + /// as dust. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::bond())] + pub fn bond( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + payee: RewardDestination, + ) -> DispatchResult { + let stash = ensure_signed(origin)?; + + ensure!(!T::Filter::contains(&stash), Error::::Restricted); + + if StakingLedger::::is_bonded(StakingAccount::Stash(stash.clone())) { + return Err(Error::::AlreadyBonded.into()); + } + + // An existing controller cannot become a stash. + if StakingLedger::::is_bonded(StakingAccount::Controller(stash.clone())) { + return Err(Error::::AlreadyPaired.into()); + } + + // Reject a bond which is considered to be _dust_. + if value < asset::existential_deposit::() { + return Err(Error::::InsufficientBond.into()); + } + + let stash_balance = asset::free_to_stake::(&stash); + let value = value.min(stash_balance); + Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: value }); + let ledger = StakingLedger::::new(stash.clone(), value); + + // You're auto-bonded forever, here. We might improve this by only bonding when + // you actually validate/nominate and remove once you unbond __everything__. + ledger.bond(payee)?; + + Ok(()) + } + + /// Add some extra amount that have appeared in the stash `free_balance` into the balance up + /// for staking. + /// + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. + /// + /// Use this if there are additional funds in your stash account that you wish to bond. + /// Unlike [`bond`](Self::bond) or [`unbond`](Self::unbond) this function does not impose + /// any limitation on the amount that can be added. + /// + /// Emits `Bonded`. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::bond_extra())] + pub fn bond_extra( + origin: OriginFor, + #[pallet::compact] max_additional: BalanceOf, + ) -> DispatchResult { + let stash = ensure_signed(origin)?; + ensure!(!T::Filter::contains(&stash), Error::::Restricted); + Self::do_bond_extra(&stash, max_additional) + } + + /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond + /// period ends. If this leaves an amount actively bonded less than + /// [`asset::existential_deposit`], then it is increased to the full amount. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// Once the unlock period is done, you can call `withdraw_unbonded` to actually move + /// the funds out of management ready for transfer. + /// + /// No more than a limited number of unlocking chunks (see `MaxUnlockingChunks`) + /// can co-exists at the same time. If there are no unlocking chunks slots available + /// [`Call::withdraw_unbonded`] is called to remove some of the chunks (if possible). + /// + /// If a user encounters the `InsufficientBond` error when calling this extrinsic, + /// they should call `chill` first in order to free up their bonded funds. + /// + /// Emits `Unbonded`. + /// + /// See also [`Call::withdraw_unbonded`]. + #[pallet::call_index(2)] + #[pallet::weight( + T::WeightInfo::withdraw_unbonded_kill(SPECULATIVE_NUM_SPANS).saturating_add(T::WeightInfo::unbond())) + ] + pub fn unbond( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + ) -> DispatchResultWithPostInfo { + let controller = ensure_signed(origin)?; + let unlocking = + Self::ledger(Controller(controller.clone())).map(|l| l.unlocking.len())?; + + // if there are no unlocking chunks available, try to withdraw chunks older than + // `BondingDuration` to proceed with the unbonding. + let maybe_withdraw_weight = { + if unlocking == T::MaxUnlockingChunks::get() as usize { + let real_num_slashing_spans = + SlashingSpans::::get(&controller).map_or(0, |s| s.iter().count()); + Some(Self::do_withdraw_unbonded(&controller, real_num_slashing_spans as u32)?) + } else { + None + } + }; + + // we need to fetch the ledger again because it may have been mutated in the call + // to `Self::do_withdraw_unbonded` above. + let mut ledger = Self::ledger(Controller(controller))?; + let mut value = value.min(ledger.active); + let stash = ledger.stash.clone(); + + ensure!( + ledger.unlocking.len() < T::MaxUnlockingChunks::get() as usize, + Error::::NoMoreChunks, + ); + + if !value.is_zero() { + ledger.active -= value; + + // Avoid there being a dust balance left in the staking system. + if ledger.active < asset::existential_deposit::() { + value += ledger.active; + ledger.active = Zero::zero(); + } + + let min_active_bond = if Nominators::::contains_key(&stash) { + MinNominatorBond::::get() + } else if Validators::::contains_key(&stash) { + MinValidatorBond::::get() + } else { + Zero::zero() + }; + + // Make sure that the user maintains enough active bond for their role. + // If a user runs into this error, they should chill first. + ensure!(ledger.active >= min_active_bond, Error::::InsufficientBond); + + // Note: in case there is no current era it is fine to bond one era more. + let era = CurrentEra::::get() + .unwrap_or(0) + .defensive_saturating_add(T::BondingDuration::get()); + if let Some(chunk) = ledger.unlocking.last_mut().filter(|chunk| chunk.era == era) { + // To keep the chunk count down, we only keep one chunk per era. Since + // `unlocking` is a FiFo queue, if a chunk exists for `era` we know that it will + // be the last one. + chunk.value = chunk.value.defensive_saturating_add(value) + } else { + ledger + .unlocking + .try_push(UnlockChunk { value, era }) + .map_err(|_| Error::::NoMoreChunks)?; + }; + // NOTE: ledger must be updated prior to calling `Self::weight_of`. + ledger.update()?; + + // update this staker in the sorted list, if they exist in it. + if T::VoterList::contains(&stash) { + let _ = T::VoterList::on_update(&stash, Self::weight_of(&stash)); + } + + Self::deposit_event(Event::::Unbonded { stash, amount: value }); + } + + let actual_weight = if let Some(withdraw_weight) = maybe_withdraw_weight { + Some(T::WeightInfo::unbond().saturating_add(withdraw_weight)) + } else { + Some(T::WeightInfo::unbond()) + }; + + Ok(actual_weight.into()) + } + + /// Remove any unlocked chunks from the `unlocking` queue from our management. + /// + /// This essentially frees up that balance to be used by the stash account to do whatever + /// it wants. + /// + /// The dispatch origin for this call must be _Signed_ by the controller. + /// + /// Emits `Withdrawn`. + /// + /// See also [`Call::unbond`]. + /// + /// ## Parameters + /// + /// - `num_slashing_spans` indicates the number of metadata slashing spans to clear when + /// this call results in a complete removal of all the data related to the stash account. + /// In this case, the `num_slashing_spans` must be larger or equal to the number of + /// slashing spans associated with the stash account in the [`SlashingSpans`] storage type, + /// otherwise the call will fail. The call weight is directly proportional to + /// `num_slashing_spans`. + #[pallet::call_index(3)] + #[pallet::weight(T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans))] + pub fn withdraw_unbonded( + origin: OriginFor, + num_slashing_spans: u32, + ) -> DispatchResultWithPostInfo { + let controller = ensure_signed(origin)?; + + let actual_weight = Self::do_withdraw_unbonded(&controller, num_slashing_spans)?; + Ok(Some(actual_weight).into()) + } + + /// Declare the desire to validate for the origin controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::validate())] + pub fn validate(origin: OriginFor, prefs: ValidatorPrefs) -> DispatchResult { + let controller = ensure_signed(origin)?; + + let ledger = Self::ledger(Controller(controller))?; + + ensure!(ledger.active >= MinValidatorBond::::get(), Error::::InsufficientBond); + let stash = &ledger.stash; + + // ensure their commission is correct. + ensure!(prefs.commission >= MinCommission::::get(), Error::::CommissionTooLow); + + // Only check limits if they are not already a validator. + if !Validators::::contains_key(stash) { + // If this error is reached, we need to adjust the `MinValidatorBond` and start + // calling `chill_other`. Until then, we explicitly block new validators to protect + // the runtime. + if let Some(max_validators) = MaxValidatorsCount::::get() { + ensure!( + Validators::::count() < max_validators, + Error::::TooManyValidators + ); + } + } + + Self::do_remove_nominator(stash); + Self::do_add_validator(stash, prefs.clone()); + Self::deposit_event(Event::::ValidatorPrefsSet { stash: ledger.stash, prefs }); + + Ok(()) + } + + /// Declare the desire to nominate `targets` for the origin controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::nominate(targets.len() as u32))] + pub fn nominate( + origin: OriginFor, + targets: Vec>, + ) -> DispatchResult { + let controller = ensure_signed(origin)?; + + let ledger = Self::ledger(StakingAccount::Controller(controller.clone()))?; + + ensure!(ledger.active >= MinNominatorBond::::get(), Error::::InsufficientBond); + let stash = &ledger.stash; + + // Only check limits if they are not already a nominator. + if !Nominators::::contains_key(stash) { + // If this error is reached, we need to adjust the `MinNominatorBond` and start + // calling `chill_other`. Until then, we explicitly block new nominators to protect + // the runtime. + if let Some(max_nominators) = MaxNominatorsCount::::get() { + ensure!( + Nominators::::count() < max_nominators, + Error::::TooManyNominators + ); + } + } + + // dedup targets + let mut targets = targets + .into_iter() + .map(|t| T::Lookup::lookup(t).map_err(DispatchError::from)) + .collect::, _>>()?; + targets.sort(); + targets.dedup(); + + ensure!(!targets.is_empty(), Error::::EmptyTargets); + ensure!( + targets.len() <= T::NominationsQuota::get_quota(ledger.active) as usize, + Error::::TooManyTargets + ); + + let old = Nominators::::get(stash).map_or_else(Vec::new, |x| x.targets.into_inner()); + + let targets: BoundedVec<_, _> = targets + .into_iter() + .map(|n| { + if old.contains(&n) || !Validators::::get(&n).blocked { + Ok(n) + } else { + Err(Error::::BadTarget.into()) + } + }) + .collect::, DispatchError>>()? + .try_into() + .map_err(|_| Error::::TooManyNominators)?; + + let nominations = Nominations { + targets, + // Initial nominations are considered submitted at era 0. See `Nominations` doc. + submitted_in: CurrentEra::::get().unwrap_or(0), + suppressed: false, + }; + + Self::do_remove_validator(stash); + Self::do_add_nominator(stash, nominations); + Ok(()) + } + + /// Declare no desire to either validate or nominate. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// ## Complexity + /// - Independent of the arguments. Insignificant complexity. + /// - Contains one read. + /// - Writes are limited to the `origin` account key. + #[pallet::call_index(6)] + #[pallet::weight(T::WeightInfo::chill())] + pub fn chill(origin: OriginFor) -> DispatchResult { + let controller = ensure_signed(origin)?; + + let ledger = Self::ledger(StakingAccount::Controller(controller))?; + + Self::chill_stash(&ledger.stash); + Ok(()) + } + + /// (Re-)set the payment target for a controller. + /// + /// Effects will be felt instantly (as soon as this function is completed successfully). + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::set_payee())] + pub fn set_payee( + origin: OriginFor, + payee: RewardDestination, + ) -> DispatchResult { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(Controller(controller.clone()))?; + + ensure!( + (payee != { + #[allow(deprecated)] + RewardDestination::Controller + }), + Error::::ControllerDeprecated + ); + + let _ = ledger + .set_payee(payee) + .defensive_proof("ledger was retrieved from storage, thus it's bonded; qed.")?; + + Ok(()) + } + + /// (Re-)sets the controller of a stash to the stash itself. This function previously + /// accepted a `controller` argument to set the controller to an account other than the + /// stash itself. This functionality has now been removed, now only setting the controller + /// to the stash, if it is not already. + /// + /// Effects will be felt instantly (as soon as this function is completed successfully). + /// + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::set_controller())] + pub fn set_controller(origin: OriginFor) -> DispatchResult { + let stash = ensure_signed(origin)?; + + Self::ledger(StakingAccount::Stash(stash.clone())).map(|ledger| { + let controller = ledger.controller() + .defensive_proof("Ledger's controller field didn't exist. The controller should have been fetched using StakingLedger.") + .ok_or(Error::::NotController)?; + + if controller == stash { + // Stash is already its own controller. + return Err(Error::::AlreadyPaired.into()) + } + + let _ = ledger.set_controller_to_stash()?; + Ok(()) + })? + } + + /// Sets the ideal number of validators. + /// + /// The dispatch origin must be Root. + #[pallet::call_index(9)] + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn set_validator_count( + origin: OriginFor, + #[pallet::compact] new: u32, + ) -> DispatchResult { + ensure_root(origin)?; + + ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); + + ValidatorCount::::put(new); + Ok(()) + } + + /// Increments the ideal number of validators up to maximum of + /// `T::MaxValidatorSet`. + /// + /// The dispatch origin must be Root. + #[pallet::call_index(10)] + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn increase_validator_count( + origin: OriginFor, + #[pallet::compact] additional: u32, + ) -> DispatchResult { + ensure_root(origin)?; + let old = ValidatorCount::::get(); + let new = old.checked_add(additional).ok_or(ArithmeticError::Overflow)?; + + ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); + + ValidatorCount::::put(new); + Ok(()) + } + + /// Scale up the ideal number of validators by a factor up to maximum of + /// `T::MaxValidatorSet`. + /// + /// The dispatch origin must be Root. + #[pallet::call_index(11)] + #[pallet::weight(T::WeightInfo::set_validator_count())] + pub fn scale_validator_count(origin: OriginFor, factor: Percent) -> DispatchResult { + ensure_root(origin)?; + let old = ValidatorCount::::get(); + let new = old.checked_add(factor.mul_floor(old)).ok_or(ArithmeticError::Overflow)?; + + ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); + + ValidatorCount::::put(new); + Ok(()) + } + + /// Force there to be no new eras indefinitely. + /// + /// The dispatch origin must be Root. + /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// Thus the election process may be ongoing when this is called. In this case the + /// election will continue until the next era is triggered. + #[pallet::call_index(12)] + #[pallet::weight(T::WeightInfo::force_no_eras())] + pub fn force_no_eras(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + Self::set_force_era(Forcing::ForceNone); + Ok(()) + } + + /// Force there to be a new era at the end of the next session. After this, it will be + /// reset to normal (non-forced) behaviour. + /// + /// The dispatch origin must be Root. + /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// If this is called just before a new era is triggered, the election process may not + /// have enough blocks to get a result. + #[pallet::call_index(13)] + #[pallet::weight(T::WeightInfo::force_new_era())] + pub fn force_new_era(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + Self::set_force_era(Forcing::ForceNew); + Ok(()) + } + + /// Set the validators who cannot be slashed (if any). + /// + /// The dispatch origin must be Root. + #[pallet::call_index(14)] + #[pallet::weight(T::WeightInfo::set_invulnerables(invulnerables.len() as u32))] + pub fn set_invulnerables( + origin: OriginFor, + invulnerables: Vec, + ) -> DispatchResult { + ensure_root(origin)?; + let invulnerables = + BoundedVec::try_from(invulnerables).map_err(|_| Error::::BoundNotMet)?; + >::put(invulnerables); + Ok(()) + } + + /// Force a current staker to become completely unstaked, immediately. + /// + /// The dispatch origin must be Root. + /// + /// ## Parameters + /// + /// - `num_slashing_spans`: Refer to comments on [`Call::withdraw_unbonded`] for more + /// details. + #[pallet::call_index(15)] + #[pallet::weight(T::WeightInfo::force_unstake(*num_slashing_spans))] + pub fn force_unstake( + origin: OriginFor, + stash: T::AccountId, + num_slashing_spans: u32, + ) -> DispatchResult { + ensure_root(origin)?; + + // Remove all staking-related information and lock. + Self::kill_stash(&stash, num_slashing_spans)?; + + Ok(()) + } + + /// Force there to be a new era at the end of sessions indefinitely. + /// + /// The dispatch origin must be Root. + /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// If this is called just before a new era is triggered, the election process may not + /// have enough blocks to get a result. + #[pallet::call_index(16)] + #[pallet::weight(T::WeightInfo::force_new_era_always())] + pub fn force_new_era_always(origin: OriginFor) -> DispatchResult { + ensure_root(origin)?; + Self::set_force_era(Forcing::ForceAlways); + Ok(()) + } + + /// Cancels scheduled slashes for a given era before they are applied. + /// + /// This function allows `T::AdminOrigin` to selectively remove pending slashes from + /// the `UnappliedSlashes` storage, preventing their enactment. + /// + /// ## Parameters + /// - `era`: The staking era for which slashes were deferred. + /// - `slash_keys`: A list of slash keys identifying the slashes to remove. This is a tuple + /// of `(stash, slash_fraction, page_index)`. + #[pallet::call_index(17)] + #[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_keys.len() as u32))] + pub fn cancel_deferred_slash( + origin: OriginFor, + era: EraIndex, + slash_keys: Vec<(T::AccountId, Perbill, u32)>, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + ensure!(!slash_keys.is_empty(), Error::::EmptyTargets); + + // Remove the unapplied slashes. + slash_keys.into_iter().for_each(|i| { + UnappliedSlashes::::take(&era, &i).map(|unapplied_slash| { + Self::deposit_event(Event::::SlashCancelled { + slash_era: era, + slash_key: i, + payout: unapplied_slash.payout, + }); + }); + }); + Ok(()) + } + + /// Pay out next page of the stakers behind a validator for the given era. + /// + /// - `validator_stash` is the stash account of the validator. + /// - `era` may be any era between `[current_era - history_depth; current_era]`. + /// + /// The origin of this call must be _Signed_. Any account can call this function, even if + /// it is not one of the stakers. + /// + /// The reward payout could be paged in case there are too many nominators backing the + /// `validator_stash`. This call will payout unpaid pages in an ascending order. To claim a + /// specific page, use `payout_stakers_by_page`.` + /// + /// If all pages are claimed, it returns an error `InvalidPage`. + #[pallet::call_index(18)] + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxExposurePageSize::get()))] + pub fn payout_stakers( + origin: OriginFor, + validator_stash: T::AccountId, + era: EraIndex, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + + Self::do_payout_stakers(validator_stash, era) + } + + /// Rebond a portion of the stash scheduled to be unlocked. + /// + /// The dispatch origin must be signed by the controller. + #[pallet::call_index(19)] + #[pallet::weight(T::WeightInfo::rebond(T::MaxUnlockingChunks::get() as u32))] + pub fn rebond( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + ) -> DispatchResultWithPostInfo { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(Controller(controller))?; + + ensure!(!T::Filter::contains(&ledger.stash), Error::::Restricted); + ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); + + let initial_unlocking = ledger.unlocking.len() as u32; + let (ledger, rebonded_value) = ledger.rebond(value); + // Last check: the new active amount of ledger must be more than ED. + ensure!( + ledger.active >= asset::existential_deposit::(), + Error::::InsufficientBond + ); + + Self::deposit_event(Event::::Bonded { + stash: ledger.stash.clone(), + amount: rebonded_value, + }); + + let stash = ledger.stash.clone(); + let final_unlocking = ledger.unlocking.len(); + + // NOTE: ledger must be updated prior to calling `Self::weight_of`. + ledger.update()?; + if T::VoterList::contains(&stash) { + let _ = T::VoterList::on_update(&stash, Self::weight_of(&stash)); + } + + let removed_chunks = 1u32 // for the case where the last iterated chunk is not removed + .saturating_add(initial_unlocking) + .saturating_sub(final_unlocking as u32); + Ok(Some(T::WeightInfo::rebond(removed_chunks)).into()) + } + + /// Remove all data structures concerning a staker/stash once it is at a state where it can + /// be considered `dust` in the staking system. The requirements are: + /// + /// 1. the `total_balance` of the stash is below existential deposit. + /// 2. or, the `ledger.total` of the stash is below existential deposit. + /// 3. or, existential deposit is zero and either `total_balance` or `ledger.total` is zero. + /// + /// The former can happen in cases like a slash; the latter when a fully unbonded account + /// is still receiving staking rewards in `RewardDestination::Staked`. + /// + /// It can be called by anyone, as long as `stash` meets the above requirements. + /// + /// Refunds the transaction fees upon successful execution. + /// + /// ## Parameters + /// + /// - `num_slashing_spans`: Refer to comments on [`Call::withdraw_unbonded`] for more + /// details. + #[pallet::call_index(20)] + #[pallet::weight(T::WeightInfo::reap_stash(*num_slashing_spans))] + pub fn reap_stash( + origin: OriginFor, + stash: T::AccountId, + num_slashing_spans: u32, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + // virtual stakers should not be allowed to be reaped. + ensure!(!Self::is_virtual_staker(&stash), Error::::VirtualStakerNotAllowed); + + let ed = asset::existential_deposit::(); + let origin_balance = asset::total_balance::(&stash); + let ledger_total = + Self::ledger(Stash(stash.clone())).map(|l| l.total).unwrap_or_default(); + let reapable = origin_balance < ed || + origin_balance.is_zero() || + ledger_total < ed || + ledger_total.is_zero(); + ensure!(reapable, Error::::FundedTarget); + + // Remove all staking-related information and lock. + Self::kill_stash(&stash, num_slashing_spans)?; + + Ok(Pays::No.into()) + } + + /// Remove the given nominations from the calling validator. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// - `who`: A list of nominator stash accounts who are nominating this validator which + /// should no longer be nominating this validator. + /// + /// Note: Making this call only makes sense if you first set the validator preferences to + /// block any further nominations. + #[pallet::call_index(21)] + #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] + pub fn kick(origin: OriginFor, who: Vec>) -> DispatchResult { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(Controller(controller))?; + let stash = &ledger.stash; + + for nom_stash in who + .into_iter() + .map(T::Lookup::lookup) + .collect::, _>>()? + .into_iter() + { + Nominators::::mutate(&nom_stash, |maybe_nom| { + if let Some(ref mut nom) = maybe_nom { + if let Some(pos) = nom.targets.iter().position(|v| v == stash) { + nom.targets.swap_remove(pos); + Self::deposit_event(Event::::Kicked { + nominator: nom_stash.clone(), + stash: stash.clone(), + }); + } + } + }); + } + + Ok(()) + } + + /// Update the various staking configurations . + /// + /// * `min_nominator_bond`: The minimum active bond needed to be a nominator. + /// * `min_validator_bond`: The minimum active bond needed to be a validator. + /// * `max_nominator_count`: The max number of users who can be a nominator at once. When + /// set to `None`, no limit is enforced. + /// * `max_validator_count`: The max number of users who can be a validator at once. When + /// set to `None`, no limit is enforced. + /// * `chill_threshold`: The ratio of `max_nominator_count` or `max_validator_count` which + /// should be filled in order for the `chill_other` transaction to work. + /// * `min_commission`: The minimum amount of commission that each validators must maintain. + /// This is checked only upon calling `validate`. Existing validators are not affected. + /// + /// RuntimeOrigin must be Root to call this function. + /// + /// NOTE: Existing nominators and validators will not be affected by this update. + /// to kick people under the new limits, `chill_other` should be called. + // We assume the worst case for this call is either: all items are set or all items are + // removed. + #[pallet::call_index(22)] + #[pallet::weight( + T::WeightInfo::set_staking_configs_all_set() + .max(T::WeightInfo::set_staking_configs_all_remove()) + )] + pub fn set_staking_configs( + origin: OriginFor, + min_nominator_bond: ConfigOp>, + min_validator_bond: ConfigOp>, + max_nominator_count: ConfigOp, + max_validator_count: ConfigOp, + chill_threshold: ConfigOp, + min_commission: ConfigOp, + max_staked_rewards: ConfigOp, + ) -> DispatchResult { + ensure_root(origin)?; + + macro_rules! config_op_exp { + ($storage:ty, $op:ident) => { + match $op { + ConfigOp::Noop => (), + ConfigOp::Set(v) => <$storage>::put(v), + ConfigOp::Remove => <$storage>::kill(), + } + }; + } + + config_op_exp!(MinNominatorBond, min_nominator_bond); + config_op_exp!(MinValidatorBond, min_validator_bond); + config_op_exp!(MaxNominatorsCount, max_nominator_count); + config_op_exp!(MaxValidatorsCount, max_validator_count); + config_op_exp!(ChillThreshold, chill_threshold); + config_op_exp!(MinCommission, min_commission); + config_op_exp!(MaxStakedRewards, max_staked_rewards); + Ok(()) + } + /// Declare a `controller` to stop participating as either a validator or nominator. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_, but can be called by anyone. + /// + /// If the caller is the same as the controller being targeted, then no further checks are + /// enforced, and this function behaves just like `chill`. + /// + /// If the caller is different than the controller being targeted, the following conditions + /// must be met: + /// + /// * `controller` must belong to a nominator who has become non-decodable, + /// + /// Or: + /// + /// * A `ChillThreshold` must be set and checked which defines how close to the max + /// nominators or validators we must reach before users can start chilling one-another. + /// * A `MaxNominatorCount` and `MaxValidatorCount` must be set which is used to determine + /// how close we are to the threshold. + /// * A `MinNominatorBond` and `MinValidatorBond` must be set and checked, which determines + /// if this is a person that should be chilled because they have not met the threshold + /// bond required. + /// + /// This can be helpful if bond requirements are updated, and we need to remove old users + /// who do not satisfy these requirements. + #[pallet::call_index(23)] + #[pallet::weight(T::WeightInfo::chill_other())] + pub fn chill_other(origin: OriginFor, stash: T::AccountId) -> DispatchResult { + // Anyone can call this function. + let caller = ensure_signed(origin)?; + let ledger = Self::ledger(Stash(stash.clone()))?; + let controller = ledger + .controller() + .defensive_proof( + "Ledger's controller field didn't exist. The controller should have been fetched using StakingLedger.", + ) + .ok_or(Error::::NotController)?; + + // In order for one user to chill another user, the following conditions must be met: + // + // * `controller` belongs to a nominator who has become non-decodable, + // + // Or + // + // * A `ChillThreshold` is set which defines how close to the max nominators or + // validators we must reach before users can start chilling one-another. + // * A `MaxNominatorCount` and `MaxValidatorCount` which is used to determine how close + // we are to the threshold. + // * A `MinNominatorBond` and `MinValidatorBond` which is the final condition checked to + // determine this is a person that should be chilled because they have not met the + // threshold bond required. + // + // Otherwise, if caller is the same as the controller, this is just like `chill`. + + if Nominators::::contains_key(&stash) && Nominators::::get(&stash).is_none() { + Self::chill_stash(&stash); + return Ok(()); + } + + if caller != controller { + let threshold = ChillThreshold::::get().ok_or(Error::::CannotChillOther)?; + let min_active_bond = if Nominators::::contains_key(&stash) { + let max_nominator_count = + MaxNominatorsCount::::get().ok_or(Error::::CannotChillOther)?; + let current_nominator_count = Nominators::::count(); + ensure!( + threshold * max_nominator_count < current_nominator_count, + Error::::CannotChillOther + ); + MinNominatorBond::::get() + } else if Validators::::contains_key(&stash) { + let max_validator_count = + MaxValidatorsCount::::get().ok_or(Error::::CannotChillOther)?; + let current_validator_count = Validators::::count(); + ensure!( + threshold * max_validator_count < current_validator_count, + Error::::CannotChillOther + ); + MinValidatorBond::::get() + } else { + Zero::zero() + }; + + ensure!(ledger.active < min_active_bond, Error::::CannotChillOther); + } + + Self::chill_stash(&stash); + Ok(()) + } + + /// Force a validator to have at least the minimum commission. This will not affect a + /// validator who already has a commission greater than or equal to the minimum. Any account + /// can call this. + #[pallet::call_index(24)] + #[pallet::weight(T::WeightInfo::force_apply_min_commission())] + pub fn force_apply_min_commission( + origin: OriginFor, + validator_stash: T::AccountId, + ) -> DispatchResult { + ensure_signed(origin)?; + let min_commission = MinCommission::::get(); + Validators::::try_mutate_exists(validator_stash, |maybe_prefs| { + maybe_prefs + .as_mut() + .map(|prefs| { + (prefs.commission < min_commission) + .then(|| prefs.commission = min_commission) + }) + .ok_or(Error::::NotStash) + })?; + Ok(()) + } + + /// Sets the minimum amount of commission that each validators must maintain. + /// + /// This call has lower privilege requirements than `set_staking_config` and can be called + /// by the `T::AdminOrigin`. Root can always call this. + #[pallet::call_index(25)] + #[pallet::weight(T::WeightInfo::set_min_commission())] + pub fn set_min_commission(origin: OriginFor, new: Perbill) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + MinCommission::::put(new); + Ok(()) + } + + /// Pay out a page of the stakers behind a validator for the given era and page. + /// + /// - `validator_stash` is the stash account of the validator. + /// - `era` may be any era between `[current_era - history_depth; current_era]`. + /// - `page` is the page index of nominators to pay out with value between 0 and + /// `num_nominators / T::MaxExposurePageSize`. + /// + /// The origin of this call must be _Signed_. Any account can call this function, even if + /// it is not one of the stakers. + /// + /// If a validator has more than [`Config::MaxExposurePageSize`] nominators backing + /// them, then the list of nominators is paged, with each page being capped at + /// [`Config::MaxExposurePageSize`.] If a validator has more than one page of nominators, + /// the call needs to be made for each page separately in order for all the nominators + /// backing a validator to receive the reward. The nominators are not sorted across pages + /// and so it should not be assumed the highest staker would be on the topmost page and vice + /// versa. If rewards are not claimed in [`Config::HistoryDepth`] eras, they are lost. + #[pallet::call_index(26)] + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxExposurePageSize::get()))] + pub fn payout_stakers_by_page( + origin: OriginFor, + validator_stash: T::AccountId, + era: EraIndex, + page: Page, + ) -> DispatchResultWithPostInfo { + ensure_signed(origin)?; + Self::do_payout_stakers_by_page(validator_stash, era, page) + } + + /// Migrates an account's `RewardDestination::Controller` to + /// `RewardDestination::Account(controller)`. + /// + /// Effects will be felt instantly (as soon as this function is completed successfully). + /// + /// This will waive the transaction fee if the `payee` is successfully migrated. + #[pallet::call_index(27)] + #[pallet::weight(T::WeightInfo::update_payee())] + pub fn update_payee( + origin: OriginFor, + controller: T::AccountId, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + let ledger = Self::ledger(StakingAccount::Controller(controller.clone()))?; + + ensure!( + (Payee::::get(&ledger.stash) == { + #[allow(deprecated)] + Some(RewardDestination::Controller) + }), + Error::::NotController + ); + + let _ = ledger + .set_payee(RewardDestination::Account(controller)) + .defensive_proof("ledger should have been previously retrieved from storage.")?; + + Ok(Pays::No.into()) + } + + /// Updates a batch of controller accounts to their corresponding stash account if they are + /// not the same. Ignores any controller accounts that do not exist, and does not operate if + /// the stash and controller are already the same. + /// + /// Effects will be felt instantly (as soon as this function is completed successfully). + /// + /// The dispatch origin must be `T::AdminOrigin`. + #[pallet::call_index(28)] + #[pallet::weight(T::WeightInfo::deprecate_controller_batch(controllers.len() as u32))] + pub fn deprecate_controller_batch( + origin: OriginFor, + controllers: BoundedVec, + ) -> DispatchResultWithPostInfo { + T::AdminOrigin::ensure_origin(origin)?; + + // Ignore controllers that do not exist or are already the same as stash. + let filtered_batch_with_ledger: Vec<_> = controllers + .iter() + .filter_map(|controller| { + let ledger = Self::ledger(StakingAccount::Controller(controller.clone())); + ledger.ok().map_or(None, |ledger| { + // If the controller `RewardDestination` is still the deprecated + // `Controller` variant, skip deprecating this account. + let payee_deprecated = Payee::::get(&ledger.stash) == { + #[allow(deprecated)] + Some(RewardDestination::Controller) + }; + + if ledger.stash != *controller && !payee_deprecated { + Some(ledger) + } else { + None + } + }) + }) + .collect(); + + // Update unique pairs. + let mut failures = 0; + for ledger in filtered_batch_with_ledger { + let _ = ledger.clone().set_controller_to_stash().map_err(|_| failures += 1); + } + Self::deposit_event(Event::::ControllerBatchDeprecated { failures }); + + Ok(Some(T::WeightInfo::deprecate_controller_batch(controllers.len() as u32)).into()) + } + + /// Restores the state of a ledger which is in an inconsistent state. + /// + /// The requirements to restore a ledger are the following: + /// * The stash is bonded; or + /// * The stash is not bonded but it has a staking lock left behind; or + /// * If the stash has an associated ledger and its state is inconsistent; or + /// * If the ledger is not corrupted *but* its staking lock is out of sync. + /// + /// The `maybe_*` input parameters will overwrite the corresponding data and metadata of the + /// ledger associated with the stash. If the input parameters are not set, the ledger will + /// be reset values from on-chain state. + #[pallet::call_index(29)] + #[pallet::weight(T::WeightInfo::restore_ledger())] + pub fn restore_ledger( + origin: OriginFor, + stash: T::AccountId, + maybe_controller: Option, + maybe_total: Option>, + maybe_unlocking: Option>, T::MaxUnlockingChunks>>, + ) -> DispatchResult { + T::AdminOrigin::ensure_origin(origin)?; + + // cannot restore ledger for virtual stakers. + ensure!(!Self::is_virtual_staker(&stash), Error::::VirtualStakerNotAllowed); + + let current_lock = asset::staked::(&stash); + let stash_balance = asset::stakeable_balance::(&stash); + + let (new_controller, new_total) = match Self::inspect_bond_state(&stash) { + Ok(LedgerIntegrityState::Corrupted) => { + let new_controller = maybe_controller.unwrap_or(stash.clone()); + + let new_total = if let Some(total) = maybe_total { + let new_total = total.min(stash_balance); + // enforce hold == ledger.amount. + asset::update_stake::(&stash, new_total)?; + new_total + } else { + current_lock + }; + + Ok((new_controller, new_total)) + }, + Ok(LedgerIntegrityState::CorruptedKilled) => { + if current_lock == Zero::zero() { + // this case needs to restore both lock and ledger, so the new total needs + // to be given by the called since there's no way to restore the total + // on-chain. + ensure!(maybe_total.is_some(), Error::::CannotRestoreLedger); + Ok(( + stash.clone(), + maybe_total.expect("total exists as per the check above; qed."), + )) + } else { + Ok((stash.clone(), current_lock)) + } + }, + Ok(LedgerIntegrityState::LockCorrupted) => { + // ledger is not corrupted but its locks are out of sync. In this case, we need + // to enforce a new ledger.total and staking lock for this stash. + let new_total = + maybe_total.ok_or(Error::::CannotRestoreLedger)?.min(stash_balance); + asset::update_stake::(&stash, new_total)?; + + Ok((stash.clone(), new_total)) + }, + Err(Error::::BadState) => { + // the stash and ledger do not exist but lock is lingering. + asset::kill_stake::(&stash)?; + ensure!( + Self::inspect_bond_state(&stash) == Err(Error::::NotStash), + Error::::BadState + ); + + return Ok(()); + }, + Ok(LedgerIntegrityState::Ok) | Err(_) => Err(Error::::CannotRestoreLedger), + }?; + + // re-bond stash and controller tuple. + Bonded::::insert(&stash, &new_controller); + + // resoter ledger state. + let mut ledger = StakingLedger::::new(stash.clone(), new_total); + ledger.controller = Some(new_controller); + ledger.unlocking = maybe_unlocking.unwrap_or_default(); + ledger.update()?; + + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + Error::::BadState + ); + Ok(()) + } + + /// Migrates permissionlessly a stash from locks to holds. + /// + /// This removes the old lock on the stake and creates a hold on it atomically. If all + /// stake cannot be held, the best effort is made to hold as much as possible. The remaining + /// stake is removed from the ledger. + /// + /// The fee is waived if the migration is successful. + #[pallet::call_index(30)] + #[pallet::weight(T::WeightInfo::migrate_currency())] + pub fn migrate_currency( + origin: OriginFor, + stash: T::AccountId, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + Self::do_migrate_currency(&stash)?; + + // Refund the transaction fee if successful. + Ok(Pays::No.into()) + } + + /// Manually applies a deferred slash for a given era. + /// + /// Normally, slashes are automatically applied shortly after the start of the `slash_era`. + /// This function exists as a **fallback mechanism** in case slashes were not applied due to + /// unexpected reasons. It allows anyone to manually apply an unapplied slash. + /// + /// ## Parameters + /// - `slash_era`: The staking era in which the slash was originally scheduled. + /// - `slash_key`: A unique identifier for the slash, represented as a tuple: + /// - `stash`: The stash account of the validator being slashed. + /// - `slash_fraction`: The fraction of the stake that was slashed. + /// - `page_index`: The index of the exposure page being processed. + /// + /// ## Behavior + /// - The function is **permissionless**—anyone can call it. + /// - The `slash_era` **must be the current era or a past era**. If it is in the future, the + /// call fails with `EraNotStarted`. + /// - The fee is waived if the slash is successfully applied. + /// + /// ## Future Improvement + /// - Implement an **off-chain worker (OCW) task** to automatically apply slashes when there + /// is unused block space, improving efficiency. + #[pallet::call_index(31)] + #[pallet::weight(T::WeightInfo::apply_slash())] + pub fn apply_slash( + origin: OriginFor, + slash_era: EraIndex, + slash_key: (T::AccountId, Perbill, u32), + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + let active_era = ActiveEra::::get().map(|a| a.index).unwrap_or_default(); + ensure!(slash_era <= active_era, Error::::EraNotStarted); + let unapplied_slash = UnappliedSlashes::::take(&slash_era, &slash_key) + .ok_or(Error::::InvalidSlashRecord)?; + slashing::apply_slash::(unapplied_slash, slash_era); + + Ok(Pays::No.into()) + } + + /// Adjusts the staking ledger by withdrawing any excess staked amount. + /// + /// This function corrects cases where a user's recorded stake in the ledger + /// exceeds their actual staked funds. This situation can arise due to cases such as + /// external slashing by another pallet, leading to an inconsistency between the ledger + /// and the actual stake. + #[pallet::call_index(32)] + #[pallet::weight(T::DbWeight::get().reads_writes(2, 1))] + pub fn withdraw_overstake(origin: OriginFor, stash: T::AccountId) -> DispatchResult { + use sp_runtime::Saturating; + let _ = ensure_signed(origin)?; + + let ledger = Self::ledger(Stash(stash.clone()))?; + let actual_stake = asset::staked::(&stash); + let force_withdraw_amount = ledger.total.defensive_saturating_sub(actual_stake); + + // ensure there is something to force unstake. + ensure!(!force_withdraw_amount.is_zero(), Error::::BoundNotMet); + + // we ignore if active is 0. It implies the locked amount is not actively staked. The + // account can still get away from potential slash, but we can't do much better here. + StakingLedger { + total: actual_stake, + active: ledger.active.saturating_sub(force_withdraw_amount), + ..ledger + } + .update()?; + + Self::deposit_event(Event::::Withdrawn { stash, amount: force_withdraw_amount }); + + Ok(()) + } + } +} diff --git a/substrate/frame/staking-async/src/session_rotation.rs b/substrate/frame/staking-async/src/session_rotation.rs new file mode 100644 index 0000000000000..8fe278bf1ee9a --- /dev/null +++ b/substrate/frame/staking-async/src/session_rotation.rs @@ -0,0 +1,1026 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Manages all era rotation logic based on session increments. +//! +//! # Lifecycle: +//! +//! When a session ends in RC, a session report is sent to AH with the ending session index. Given +//! there are 6 sessions per Era, and we configure the PlanningEraOffset to be 1, the following +//! happens. +//! +//! ## Idle Sessions +//! In the happy path, first 3 sessions are idle. Nothing much happens in these sessions. +//! +//! +//! ## Planning New Era Session +//! In the happy path, `planning new era` session is initiated when 3rd session ends and the 4th +//! starts in the active era. +//! +//! **Triggers** +//! 1. `SessionProgress == SessionsPerEra - PlanningEraOffset` +//! 2. Forcing is set to `ForceNew` or `ForceAlways` +//! +//! **Actions** +//! 1. Triggers the election process, +//! 2. Updates the CurrentEra. +//! +//! **SkipIf** +//! CurrentEra = ActiveEra + 1 // this implies planning session has already been triggered. +//! +//! **FollowUp** +//! When the election process is over, we send the new validator set, with the CurrentEra index +//! as the id of the validator set. +//! +//! +//! ## Era Rotation Session +//! In the happy path, this happens when the 5th session ends and the 6th starts in the active era. +//! +//! **Triggers** +//! When we receive an activation timestamp from RC. +//! +//! **Assertions** +//! 1. CurrentEra must be ActiveEra + 1. +//! 2. Id of the activation timestamp same as CurrentEra. +//! +//! **Actions** +//! - Finalize the currently active era. +//! - Increment ActiveEra by 1. +//! - Cleanup the old era information. +//! - Set ErasStartSessionIndex with the activating era index and starting session index. +//! +//! **Exceptional Scenarios** +//! - Delay in exporting validator set: Triggered in a session later than 7th. +//! - Forcing Era: May triggered in a session earlier than 7th. +//! +//! ## Example Flow of a happy path +//! +//! * end 0, start 1, plan 2 +//! * end 1, start 2, plan 3 +//! * end 2, start 3, plan 4 +//! * end 3, start 4, plan 5 // `Plan new era` session. Current Era++. Trigger Election. +//! * **** Somewhere here: Election set is sent to RC, keyed with Current Era +//! * end 4, start 5, plan 6 // RC::session receives and queues this set. +//! * end 5, start 6, plan 7 // Session report contains activation timestamp with Current Era. + +use crate::*; +use alloc::vec::Vec; +use frame_election_provider_support::{BoundedSupportsOf, ElectionProvider, PageIndex}; +use frame_support::{ + pallet_prelude::*, + traits::{Defensive, DefensiveMax, DefensiveSaturating, OnUnbalanced, TryCollect}, +}; +use sp_runtime::{Perbill, Percent, Saturating}; +use sp_staking::{ + currency_to_vote::CurrencyToVote, Exposure, Page, PagedExposureMetadata, SessionIndex, +}; + +/// A handler for all era-based storage items. +/// +/// All of the following storage items must be controlled by this type: +/// +/// [`ErasValidatorPrefs`] +/// [`ErasClaimedRewards`] +/// [`ErasStakersPaged`] +/// [`ErasStakersOverview`] +/// [`ErasValidatorReward`] +/// [`ErasRewardPoints`] +/// [`ErasTotalStake`] +/// [`ErasStartSessionIndex`] +pub struct Eras(core::marker::PhantomData); + +impl Eras { + /// Prune all associated information with the given era. + /// + /// Implementation note: ATM this is deleting all the information in one go, yet it can very + /// well be done lazily. + pub(crate) fn prune_era(era: EraIndex) { + crate::log!(debug, "Pruning era {:?}", era); + let mut cursor = >::clear_prefix(era, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix(era, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix((era,), u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix(era, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + + >::remove(era); + >::remove(era); + >::remove(era); + ErasStartSessionIndex::::remove(era); + } + + pub(crate) fn set_validator_prefs(era: EraIndex, stash: &T::AccountId, prefs: ValidatorPrefs) { + debug_assert_eq!(era, Rotator::::planning_era(), "we only set prefs for planning era"); + >::insert(era, stash, prefs); + } + + pub(crate) fn get_validator_prefs(era: EraIndex, stash: &T::AccountId) -> ValidatorPrefs { + >::get(era, stash) + } + + /// Returns validator commission for this era and page. + pub(crate) fn get_validator_commission(era: EraIndex, stash: &T::AccountId) -> Perbill { + Self::get_validator_prefs(era, stash).commission + } + + /// Returns true if validator has one or more page of era rewards not claimed yet. + pub(crate) fn pending_rewards(era: EraIndex, validator: &T::AccountId) -> bool { + >::get(&era, validator) + .map(|overview| { + ErasClaimedRewards::::get(era, validator).len() < overview.page_count as usize + }) + .unwrap_or(false) + } + + /// Get exposure for a validator at a given era and page. + /// + /// This builds a paged exposure from `PagedExposureMetadata` and `ExposurePage` of the + /// validator. For older non-paged exposure, it returns the clipped exposure directly. + pub(crate) fn get_paged_exposure( + era: EraIndex, + validator: &T::AccountId, + page: Page, + ) -> Option>> { + let overview = >::get(&era, validator)?; + + // validator stake is added only in page zero + let validator_stake = if page == 0 { overview.own } else { Zero::zero() }; + + // since overview is present, paged exposure will always be present except when a + // validator has only own stake and no nominator stake. + let exposure_page = >::get((era, validator, page)).unwrap_or_default(); + + // build the exposure + Some(PagedExposure { + exposure_metadata: PagedExposureMetadata { own: validator_stake, ..overview }, + exposure_page, + }) + } + + /// Get full exposure of the validator at a given era. + pub(crate) fn get_full_exposure( + era: EraIndex, + validator: &T::AccountId, + ) -> Exposure> { + let Some(overview) = >::get(&era, validator) else { + return Exposure::default(); + }; + + let mut others = Vec::with_capacity(overview.nominator_count as usize); + for page in 0..overview.page_count { + let nominators = >::get((era, validator, page)); + others.append(&mut nominators.map(|n| n.others).defensive_unwrap_or_default()); + } + + Exposure { total: overview.total, own: overview.own, others } + } + + /// Returns the number of pages of exposure a validator has for the given era. + /// + /// For eras where paged exposure does not exist, this returns 1 to keep backward compatibility. + pub(crate) fn exposure_page_count(era: EraIndex, validator: &T::AccountId) -> Page { + >::get(&era, validator) + .map(|overview| { + if overview.page_count == 0 && overview.own > Zero::zero() { + // Even though there are no nominator pages, there is still validator's own + // stake exposed which needs to be paid out in a page. + 1 + } else { + overview.page_count + } + }) + // Always returns 1 page for older non-paged exposure. + // FIXME: Can be cleaned up with issue #13034. + .unwrap_or(1) + } + + /// Returns the next page that can be claimed or `None` if nothing to claim. + pub(crate) fn get_next_claimable_page(era: EraIndex, validator: &T::AccountId) -> Option { + // Find next claimable page of paged exposure. + let page_count = Self::exposure_page_count(era, validator); + let all_claimable_pages: Vec = (0..page_count).collect(); + let claimed_pages = ErasClaimedRewards::::get(era, validator); + + all_claimable_pages.into_iter().find(|p| !claimed_pages.contains(p)) + } + + /// Creates an entry to track validator reward has been claimed for a given era and page. + /// Noop if already claimed. + pub(crate) fn set_rewards_as_claimed(era: EraIndex, validator: &T::AccountId, page: Page) { + let mut claimed_pages = ErasClaimedRewards::::get(era, validator); + + // this should never be called if the reward has already been claimed + if claimed_pages.contains(&page) { + defensive!("Trying to set an already claimed reward"); + // nevertheless don't do anything since the page already exist in claimed rewards. + return + } + + // add page to claimed entries + claimed_pages.push(page); + ErasClaimedRewards::::insert(era, validator, claimed_pages); + } + + /// Store exposure for elected validators at start of an era. + /// + /// If the exposure does not exist yet for the tuple (era, validator), it sets it. Otherwise, + /// it updates the existing record by ensuring *intermediate* exposure pages are filled up with + /// `T::MaxExposurePageSize` number of backers per page and the remaining exposures are added + /// to new exposure pages. + pub fn upsert_exposure( + era: EraIndex, + validator: &T::AccountId, + mut exposure: Exposure>, + ) { + let page_size = T::MaxExposurePageSize::get().defensive_max(1); + + if let Some(stored_overview) = ErasStakersOverview::::get(era, &validator) { + let last_page_idx = stored_overview.page_count.saturating_sub(1); + + let mut last_page = + ErasStakersPaged::::get((era, validator, last_page_idx)).unwrap_or_default(); + let last_page_empty_slots = + T::MaxExposurePageSize::get().saturating_sub(last_page.others.len() as u32); + + // splits the exposure so that `exposures_append` will fit within the last exposure + // page, up to the max exposure page size. The remaining individual exposures in + // `exposure` will be added to new pages. + let exposures_append = exposure.split_others(last_page_empty_slots); + + ErasStakersOverview::::mutate(era, &validator, |stored| { + // new metadata is updated based on 3 different set of exposures: the + // current one, the exposure split to be "fitted" into the current last page and + // the exposure set that will be appended from the new page onwards. + let new_metadata = + stored.defensive_unwrap_or_default().update_with::( + [&exposures_append, &exposure] + .iter() + .fold(Default::default(), |total, expo| { + total.saturating_add(expo.total.saturating_sub(expo.own)) + }), + [&exposures_append, &exposure] + .iter() + .fold(Default::default(), |count, expo| { + count.saturating_add(expo.others.len() as u32) + }), + ); + *stored = new_metadata.into(); + }); + + // fill up last page with exposures. + last_page.page_total = last_page + .page_total + .saturating_add(exposures_append.total) + .saturating_sub(exposures_append.own); + last_page.others.extend(exposures_append.others); + ErasStakersPaged::::insert((era, &validator, last_page_idx), last_page); + + // now handle the remaining exposures and append the exposure pages. The metadata update + // has been already handled above. + let (_, exposure_pages) = exposure.into_pages(page_size); + + exposure_pages.iter().enumerate().for_each(|(idx, paged_exposure)| { + let append_at = + (last_page_idx.saturating_add(1).saturating_add(idx as u32)) as Page; + >::insert((era, &validator, append_at), &paged_exposure); + }); + } else { + // expected page count is the number of nominators divided by the page size, rounded up. + let expected_page_count = exposure + .others + .len() + .defensive_saturating_add((page_size as usize).defensive_saturating_sub(1)) + .saturating_div(page_size as usize); + + // no exposures yet for this (era, validator) tuple, calculate paged exposure pages and + // metadata from a blank slate. + let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size); + defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count"); + + // insert metadata. + ErasStakersOverview::::insert(era, &validator, exposure_metadata); + + // insert validator's overview. + exposure_pages.iter().enumerate().for_each(|(idx, paged_exposure)| { + let append_at = idx as Page; + >::insert((era, &validator, append_at), &paged_exposure); + }); + }; + } + + pub(crate) fn set_validators_reward(era: EraIndex, amount: BalanceOf) { + ErasValidatorReward::::insert(era, amount); + } + + pub(crate) fn get_validators_reward(era: EraIndex) -> Option> { + ErasValidatorReward::::get(era) + } + + /// Update the total exposure for all the elected validators in the era. + pub(crate) fn add_total_stake(era: EraIndex, stake: BalanceOf) { + >::mutate(era, |total_stake| { + *total_stake += stake; + }); + } + + /// Check if the rewards for the given era and page index have been claimed. + pub(crate) fn is_rewards_claimed(era: EraIndex, validator: &T::AccountId, page: Page) -> bool { + ErasClaimedRewards::::get(era, validator).contains(&page) + } + + /// Add reward points to validators using their stash account ID. + pub(crate) fn reward_active_era( + validators_points: impl IntoIterator, + ) { + if let Some(active_era) = ActiveEra::::get() { + >::mutate(active_era.index, |era_rewards| { + for (validator, points) in validators_points.into_iter() { + *era_rewards.individual.entry(validator).or_default() += points; + era_rewards.total += points; + } + }); + } + } + + pub(crate) fn get_reward_points(era: EraIndex) -> EraRewardPoints { + ErasRewardPoints::::get(era) + } +} + +#[cfg(any(feature = "try-runtime", test))] +impl Eras { + /// Ensure the given era is present, i.e. has not been pruned yet. + pub(crate) fn era_present(era: EraIndex) -> Result<(), sp_runtime::TryRuntimeError> { + // these two are only set if we have some validators in an era. + let e0 = ErasValidatorPrefs::::iter_prefix_values(era).count() != 0; + // note: we don't check `ErasStakersPaged` as a validator can have no backers. + let e1 = ErasStakersOverview::::iter_prefix_values(era).count() != 0; + assert_eq!(e0, e1, "ErasValidatorPrefs and ErasStakersOverview should be consistent"); + + // these two must always be set + let e2 = ErasTotalStake::::contains_key(era); + let e3 = ErasStartSessionIndex::::contains_key(era); + + let active_era = Rotator::::active_era(); + let e4 = if era.saturating_sub(1) > 0 && + era.saturating_sub(1) > active_era.saturating_sub(T::HistoryDepth::get() + 1) + { + // `ErasValidatorReward` is set at active era n for era n-1, and is not set for era 0 in + // our tests. Moreover, it cannot be checked for presence in the oldest present era + // (`active_era.saturating_sub(1)`) + ErasValidatorReward::::contains_key(era.saturating_sub(1)) + } else { + // ignore + e3 + }; + + assert!( + vec![e2, e3, e4].windows(2).all(|w| w[0] == w[1]), + "era info presence not consistent for era {}: {}, {}, {}", + era, + e2, + e3, + e4, + ); + + if e2 { + Ok(()) + } else { + Err("era presence mismatch".into()) + } + } + + /// Ensure the given era has indeed been already pruned. + pub(crate) fn era_absent(era: EraIndex) -> Result<(), sp_runtime::TryRuntimeError> { + // check double+ maps + let e0 = ErasValidatorPrefs::::iter_prefix_values(era).count() != 0; + let e1 = ErasStakersPaged::::iter_prefix_values((era,)).count() != 0; + let e2 = ErasStakersOverview::::iter_prefix_values(era).count() != 0; + + // check maps + // `ErasValidatorReward` is set at active era n for era n-1 + let e3 = ErasValidatorReward::::contains_key(era); + let e4 = ErasTotalStake::::contains_key(era); + let e5 = ErasStartSessionIndex::::contains_key(era); + + // these two are only populated conditionally, so we only check them for lack of existence + let e6 = ErasClaimedRewards::::iter_prefix_values(era).count() != 0; + let e7 = ErasRewardPoints::::contains_key(era); + + assert!( + vec![e0, e1, e2, e3, e4, e5, e6, e7].windows(2).all(|w| w[0] == w[1]), + "era info absence not consistent for era {}: {}, {}, {}, {}, {}, {}, {}, {}", + era, + e0, + e1, + e2, + e3, + e4, + e5, + e6, + e7 + ); + + if !e0 { + Ok(()) + } else { + Err("era absence mismatch".into()) + } + } + + pub(crate) fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + // pruning window works. + let active_era = Rotator::::active_era(); + // we max with 1 as in active era 0 we don't do an election and therefore we don't have some + // of the maps populated. + let oldest_present_era = active_era.saturating_sub(T::HistoryDepth::get()).max(1); + let maybe_first_pruned_era = + active_era.saturating_sub(T::HistoryDepth::get()).checked_sub(One::one()); + + for e in oldest_present_era..=active_era { + Self::era_present(e)? + } + if let Some(first_pruned_era) = maybe_first_pruned_era { + Self::era_absent(first_pruned_era)?; + } + Ok(()) + } +} + +/// Manages session rotation logic. +/// +/// This controls the following storage items in FULL, meaning that they should not be accessed +/// directly from anywhere else in this pallet: +/// +/// * `CurrentEra`: The current planning era +/// * `ActiveEra`: The current active era +/// * `ErasStartSessionIndex`: The starting index of the active era +/// * `BondedEras`: the list of eras +pub struct Rotator(core::marker::PhantomData); + +impl Rotator { + #[cfg(feature = "runtime-benchmarks")] + pub(crate) fn legacy_insta_plan_era() -> Vec { + // Plan the era, + Self::plan_new_era(); + // signal that we are about to call into elect asap. + <::ElectionProvider as ElectionProvider>::asap(); + // immediately call into the election provider to fetch and process the results. We assume + // we are using an instant, onchain election here. + let msp = ::msp(); + let lsp = 0; + for p in (lsp..=msp).rev() { + EraElectionPlanner::::do_elect_paged(p); + } + + crate::ElectableStashes::::take().into_iter().collect() + } + + #[cfg(any(feature = "try-runtime", test))] + pub(crate) fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + // planned era can always be at most one more than active era + let planned = Self::planning_era(); + let active = Self::active_era(); + ensure!( + planned == active || planned == active + 1, + "planned era is always equal or one more than active" + ); + Ok(()) + } + + pub fn planning_era() -> EraIndex { + CurrentEra::::get().unwrap_or(0) + } + + pub fn active_era() -> EraIndex { + ActiveEra::::get().map(|a| a.index).defensive_unwrap_or(0) + } + + /// End the session and start the next one. + pub(crate) fn end_session(end_index: SessionIndex, activation_timestamp: Option<(u64, u32)>) { + let Some(active_era) = ActiveEra::::get() else { + defensive!("Active era must always be available."); + return; + }; + let current_planned_era = Self::planning_era(); + let starting = end_index + 1; + // the session after the starting session. + let planning = starting + 1; + + log!( + info, + "Session: end {:?}, start {:?} (ts: {:?}), plan {:?}", + end_index, + starting, + activation_timestamp, + planning + ); + log!(info, "Era: active {:?}, planned {:?}", active_era.index, current_planned_era); + + match activation_timestamp { + Some((time, id)) if id == current_planned_era => { + // We rotate the era if we have the activation timestamp. + Self::start_era(active_era, starting, time); + }, + Some((_time, id)) => { + // RC has done something wrong -- we received the wrong ID. Don't start a new era. + crate::log!( + warn, + "received wrong ID with activation timestamp. Got {}, expected {}", + id, + current_planned_era + ); + }, + None => (), + } + + let active_era = Self::active_era(); + // check if we should plan new era. + let should_plan_era = match ForceEra::::get() { + // see if it's good time to plan a new era. + Forcing::NotForcing => Self::is_plan_era_deadline(starting, active_era), + // Force plan new era only once. + Forcing::ForceNew => { + ForceEra::::put(Forcing::NotForcing); + true + }, + // always plan the new era. + Forcing::ForceAlways => true, + // never force. + Forcing::ForceNone => false, + }; + + let has_pending_era = active_era < current_planned_era; + match (should_plan_era, has_pending_era) { + (false, _) => { + // nothing to consider + }, + (true, false) => { + // happy path + Self::plan_new_era(); + }, + (true, true) => { + // we are waiting for to start the previously planned era, we cannot plan a new era + // now. + crate::log!( + debug, + "time to plan a new era {}, but waiting for the activation of the previous.", + current_planned_era + ); + }, + } + + Pallet::::deposit_event(Event::SessionRotated { + starting_session: starting, + active_era: Self::active_era(), + planned_era: Self::planning_era(), + }); + } + + pub(crate) fn start_era( + ending_era: ActiveEraInfo, + starting_session: SessionIndex, + new_era_start_timestamp: u64, + ) { + // verify that a new era was planned + debug_assert!(CurrentEra::::get().unwrap_or(0) == ending_era.index + 1); + + let starting_era = ending_era.index + 1; + + // finalize the ending era. + Self::end_era(&ending_era, new_era_start_timestamp); + + // start the next era. + Self::start_era_inc_active_era(new_era_start_timestamp); + Self::start_era_update_bonded_eras(starting_era, starting_session); + + // add the index to starting session so later we can compute the era duration in sessions. + ErasStartSessionIndex::::insert(starting_era, starting_session); + + // discard old era information that is no longer needed. + Self::cleanup_old_era(starting_era); + } + + fn start_era_inc_active_era(start_timestamp: u64) { + ActiveEra::::mutate(|active_era| { + let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); + log!( + debug, + "starting active era {:?} with RC-provided timestamp {:?}", + new_index, + start_timestamp + ); + *active_era = Some(ActiveEraInfo { index: new_index, start: Some(start_timestamp) }); + }); + } + + fn start_era_update_bonded_eras(starting_era: EraIndex, start_session: SessionIndex) { + let bonding_duration = T::BondingDuration::get(); + + BondedEras::::mutate(|bonded| { + bonded.push((starting_era, start_session)); + + if starting_era > bonding_duration { + let first_kept = starting_era.defensive_saturating_sub(bonding_duration); + + // Prune out everything that's from before the first-kept index. + let n_to_prune = + bonded.iter().take_while(|&&(era_idx, _)| era_idx < first_kept).count(); + + // Kill slashing metadata. + for (pruned_era, _) in bonded.drain(..n_to_prune) { + slashing::clear_era_metadata::(pruned_era); + } + } + }); + } + + fn end_era(ending_era: &ActiveEraInfo, new_era_start: u64) { + let previous_era_start = ending_era.start.defensive_unwrap_or(new_era_start); + let era_duration = new_era_start.saturating_sub(previous_era_start); + Self::end_era_compute_payout(ending_era, era_duration); + } + + fn end_era_compute_payout(ending_era: &ActiveEraInfo, era_duration: u64) { + let staked = ErasTotalStake::::get(ending_era.index); + let issuance = asset::total_issuance::(); + + log!( + debug, + "computing inflation for era {:?} with duration {:?}", + ending_era.index, + era_duration + ); + let (validator_payout, remainder) = + T::EraPayout::era_payout(staked, issuance, era_duration); + + let total_payout = validator_payout.saturating_add(remainder); + let max_staked_rewards = MaxStakedRewards::::get().unwrap_or(Percent::from_percent(100)); + + // apply cap to validators payout and add difference to remainder. + let validator_payout = validator_payout.min(max_staked_rewards * total_payout); + let remainder = total_payout.saturating_sub(validator_payout); + + Pallet::::deposit_event(Event::::EraPaid { + era_index: ending_era.index, + validator_payout, + remainder, + }); + + // Set ending era reward. + Eras::::set_validators_reward(ending_era.index, validator_payout); + T::RewardRemainder::on_unbalanced(asset::issue::(remainder)); + } + + /// Plans a new era by kicking off the election process. + /// + /// The newly planned era is targeted to activate in the next session. + fn plan_new_era() { + let _ = CurrentEra::::try_mutate(|x| { + log!(debug, "Planning new era: {:?}, sending election start signal", x.unwrap_or(0)); + let could_start_election = EraElectionPlanner::::plan_new_election(); + *x = Some(x.unwrap_or(0) + 1); + could_start_election + }); + } + + /// Returns whether we are at the session where we should plan the new era. + fn is_plan_era_deadline(start_session: SessionIndex, active_era: EraIndex) -> bool { + let planning_era_offset = T::PlanningEraOffset::get().min(T::SessionsPerEra::get()); + // session at which we should plan the new era. + let target_plan_era_session = T::SessionsPerEra::get().saturating_sub(planning_era_offset); + let era_start_session = ErasStartSessionIndex::::get(&active_era).unwrap_or(0); + + // progress of the active era in sessions. + let session_progress = + start_session.saturating_add(1).defensive_saturating_sub(era_start_session); + + log!( + debug, + "Session progress within era: {:?}, target_plan_era_session: {:?}", + session_progress, + target_plan_era_session + ); + session_progress >= target_plan_era_session + } + + fn cleanup_old_era(starting_era: EraIndex) { + EraElectionPlanner::::cleanup(); + + // discard the ancient era info. + if let Some(old_era) = starting_era.checked_sub(T::HistoryDepth::get() + 1) { + log!(debug, "Removing era information for {:?}", old_era); + Eras::::prune_era(old_era); + } + } +} + +/// Manager type which collects the election results from [`Config::ElectionProvider`] and +/// finalizes the planning of a new era. +/// +/// This type managed 3 storage items: +/// +/// * [`crate::VoterSnapshotStatus`] +/// * [`crate::NextElectionPage`] +/// * [`crate::ElectableStashes`] +/// +/// A new election is fetched over multiple pages, and finalized upon fetching the last page. +/// +/// * The intermediate state of fetching the election result is kept in [`NextElectionPage`]. If +/// `Some(_)` something is ongoing, otherwise not. +/// * We fully trust [`Config::ElectionProvider`] to give us a full set of validators, with enough +/// backing after all calls to `maybe_fetch_election_results` are done. Note that older versions +/// of this pallet had a `MinimumValidatorCount` to double-check this, but we don't check it +/// anymore. +/// * `maybe_fetch_election_results` returns no weight. Its weight should be taken account in the +/// e2e benchmarking of the [`Config::ElectionProvider`]. +/// +/// TODOs: +/// +/// * Add a try-state check based on the 3 storage items +/// * Move snapshot creation functions here as well. +pub(crate) struct EraElectionPlanner(PhantomData); +impl EraElectionPlanner { + /// Cleanup all associated storage items. + pub(crate) fn cleanup() { + VoterSnapshotStatus::::kill(); + NextElectionPage::::kill(); + ElectableStashes::::kill(); + Pallet::::register_weight(T::DbWeight::get().writes(3)); + } + + /// Fetches the number of pages configured by the election provider. + pub(crate) fn election_pages() -> u32 { + <::ElectionProvider as ElectionProvider>::Pages::get() + } + + /// Plan a new election + pub(crate) fn plan_new_election() -> Result<(), ::Error> + { + T::ElectionProvider::start() + .inspect_err(|e| log!(warn, "Election provider failed to start: {:?}", e)) + } + + /// Hook to be used in the pallet's on-initialize. + pub(crate) fn maybe_fetch_election_results() { + if let Ok(true) = T::ElectionProvider::status() { + crate::log!( + debug, + "Election provider is ready, our status is {:?}", + NextElectionPage::::get() + ); + + debug_assert!( + CurrentEra::::get().unwrap_or(0) == + ActiveEra::::get().map_or(0, |a| a.index) + 1, + "Next era must be already planned." + ); + + let current_page = NextElectionPage::::get() + .unwrap_or(Self::election_pages().defensive_saturating_sub(1)); + let maybe_next_page = current_page.checked_sub(1); + crate::log!(debug, "fetching page {:?}, next {:?}", current_page, maybe_next_page); + + Self::do_elect_paged(current_page); + NextElectionPage::::set(maybe_next_page); + + // if current page was `Some`, and next is `None`, we have finished an election and + // we can report it now. + if maybe_next_page.is_none() { + use pallet_staking_async_rc_client::RcClientInterface; + let id = CurrentEra::::get().defensive_unwrap_or(0); + let prune_up_to = Self::get_prune_up_to(); + + crate::log!( + info, + "Send new validator set to RC. ID: {:?}, prune_up_to: {:?}", + id, + prune_up_to + ); + + T::RcClientInterface::validator_set( + ElectableStashes::::take().into_iter().collect(), + id, + prune_up_to, + ); + } + } + } + + /// Get the right value of the first session that needs to be pruned on the RC's historical + /// session pallet. + fn get_prune_up_to() -> Option { + let bonded_eras = BondedEras::::get(); + + // get the first session of the oldest era in the bonded eras. + if (bonded_eras.len() as u32) < T::BondingDuration::get() { + None + } else { + Some(bonded_eras.first().map(|(_, first_session)| *first_session).unwrap_or(0)) + } + } + + /// Paginated elect. + /// + /// Fetches the election page with index `page` from the election provider. + /// + /// The results from the elect call should be stored in the `ElectableStashes` storage. In + /// addition, it stores stakers' information for next planned era based on the paged + /// solution data returned. + /// + /// If any new election winner does not fit in the electable stashes storage, it truncates + /// the result of the election. We ensure that only the winners that are part of the + /// electable stashes have exposures collected for the next era. + pub(crate) fn do_elect_paged(page: PageIndex) { + let election_result = T::ElectionProvider::elect(page); + match election_result { + Ok(supports) => { + let inner_processing_results = Self::do_elect_paged_inner(supports); + if let Err(not_included) = inner_processing_results { + defensive!( + "electable stashes exceeded limit, unexpected but election proceeds.\ + {} stashes from election result discarded", + not_included + ); + }; + + Pallet::::deposit_event(Event::PagedElectionProceeded { + page, + result: inner_processing_results.map(|x| x as u32).map_err(|x| x as u32), + }); + }, + Err(e) => { + log!(warn, "election provider page failed due to {:?} (page: {})", e, page); + Pallet::::deposit_event(Event::PagedElectionProceeded { page, result: Err(0) }); + }, + } + } + + /// Inner implementation of [`Self::do_elect_paged`]. + /// + /// Returns an error if adding election winners to the electable stashes storage fails due + /// to exceeded bounds. In case of error, it returns the index of the first stash that + /// failed to be included. + pub(crate) fn do_elect_paged_inner( + mut supports: BoundedSupportsOf, + ) -> Result { + let planning_era = Rotator::::planning_era(); + + match Self::add_electables(supports.iter().map(|(s, _)| s.clone())) { + Ok(added) => { + let exposures = Self::collect_exposures(supports); + let _ = Self::store_stakers_info(exposures, planning_era); + Ok(added) + }, + Err(not_included_idx) => { + let not_included = supports.len().saturating_sub(not_included_idx); + + log!( + warn, + "not all winners fit within the electable stashes, excluding {:?} accounts from solution.", + not_included, + ); + + // filter out supports of stashes that do not fit within the electable stashes + // storage bounds to prevent collecting their exposures. + supports.truncate(not_included_idx); + let exposures = Self::collect_exposures(supports); + let _ = Self::store_stakers_info(exposures, planning_era); + + Err(not_included) + }, + } + } + + /// Process the output of a paged election. + /// + /// Store staking information for the new planned era of a single election page. + pub(crate) fn store_stakers_info( + exposures: BoundedExposuresOf, + new_planned_era: EraIndex, + ) -> BoundedVec> { + // populate elected stash, stakers, exposures, and the snapshot of validator prefs. + let mut total_stake_page: BalanceOf = Zero::zero(); + let mut elected_stashes_page = Vec::with_capacity(exposures.len()); + let mut total_backers = 0u32; + + exposures.into_iter().for_each(|(stash, exposure)| { + log!( + trace, + "stored exposure for stash {:?} and {:?} backers", + stash, + exposure.others.len() + ); + // build elected stash. + elected_stashes_page.push(stash.clone()); + // accumulate total stake. + total_stake_page = total_stake_page.saturating_add(exposure.total); + // set or update staker exposure for this era. + total_backers += exposure.others.len() as u32; + Eras::::upsert_exposure(new_planned_era, &stash, exposure); + }); + + let elected_stashes: BoundedVec<_, MaxWinnersPerPageOf> = + elected_stashes_page + .try_into() + .expect("both types are bounded by MaxWinnersPerPageOf; qed"); + + // adds to total stake in this era. + Eras::::add_total_stake(new_planned_era, total_stake_page); + + // collect or update the pref of all winners. + for stash in &elected_stashes { + let pref = Validators::::get(stash); + Eras::::set_validator_prefs(new_planned_era, stash, pref); + } + + log!( + info, + "stored a page of stakers with {:?} validators and {:?} total backers for era {:?}", + elected_stashes.len(), + total_backers, + new_planned_era, + ); + + elected_stashes + } + + /// Consume a set of [`BoundedSupports`] from [`sp_npos_elections`] and collect them into a + /// [`Exposure`]. + /// + /// Returns vec of all the exposures of a validator in `paged_supports`, bounded by the + /// number of max winners per page returned by the election provider. + fn collect_exposures( + supports: BoundedSupportsOf, + ) -> BoundedExposuresOf { + let total_issuance = asset::total_issuance::(); + let to_currency = |e: frame_election_provider_support::ExtendedBalance| { + T::CurrencyToVote::to_currency(e, total_issuance) + }; + + supports + .into_iter() + .map(|(validator, support)| { + // Build `struct exposure` from `support`. + let mut others = Vec::with_capacity(support.voters.len()); + let mut own: BalanceOf = Zero::zero(); + let mut total: BalanceOf = Zero::zero(); + support + .voters + .into_iter() + .map(|(nominator, weight)| (nominator, to_currency(weight))) + .for_each(|(nominator, stake)| { + if nominator == validator { + defensive_assert!(own == Zero::zero(), "own stake should be unique"); + own = own.saturating_add(stake); + } else { + others.push(IndividualExposure { who: nominator, value: stake }); + } + total = total.saturating_add(stake); + }); + + let exposure = Exposure { own, others, total }; + (validator, exposure) + }) + .try_collect() + .expect("we only map through support vector which cannot change the size; qed") + } + + /// Adds a new set of stashes to the electable stashes. + /// + /// Returns: + /// + /// `Ok(newly_added)` if all stashes were added successfully. + /// `Err(first_un_included)` if some stashes cannot be added due to bounds. + pub(crate) fn add_electables( + new_stashes: impl Iterator, + ) -> Result { + ElectableStashes::::mutate(|electable| { + let pre_size = electable.len(); + + for (idx, stash) in new_stashes.enumerate() { + if electable.try_insert(stash).is_err() { + return Err(idx); + } + } + + Ok(electable.len() - pre_size) + }) + } +} diff --git a/substrate/frame/staking-async/src/slashing.rs b/substrate/frame/staking-async/src/slashing.rs new file mode 100644 index 0000000000000..671f1a81ae32f --- /dev/null +++ b/substrate/frame/staking-async/src/slashing.rs @@ -0,0 +1,1037 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A slashing implementation for NPoS systems. +//! +//! For the purposes of the economic model, it is easiest to think of each validator as a nominator +//! which nominates only its own identity. +//! +//! The act of nomination signals intent to unify economic identity with the validator - to take +//! part in the rewards of a job well done, and to take part in the punishment of a job done badly. +//! +//! There are 3 main difficulties to account for with slashing in NPoS: +//! - A nominator can nominate multiple validators and be slashed via any of them. +//! - Until slashed, stake is reused from era to era. Nominating with N coins for E eras in a row +//! does not mean you have N*E coins to be slashed - you've only ever had N. +//! - Slashable offences can be found after the fact and out of order. +//! +//! The algorithm implemented in this module tries to balance these 3 difficulties. +//! +//! First, we only slash participants for the _maximum_ slash they receive in some time period, +//! rather than the sum. This ensures a protection from overslashing. +//! +//! Second, we do not want the time period (or "span") that the maximum is computed +//! over to last indefinitely. That would allow participants to begin acting with +//! impunity after some point, fearing no further repercussions. For that reason, we +//! automatically "chill" validators and withdraw a nominator's nomination after a slashing event, +//! requiring them to re-enlist voluntarily (acknowledging the slash) and begin a new +//! slashing span. +//! +//! Typically, you will have a single slashing event per slashing span. Only in the case +//! where a validator releases many misbehaviors at once, or goes "back in time" to misbehave in +//! eras that have already passed, would you encounter situations where a slashing span +//! has multiple misbehaviors. However, accounting for such cases is necessary +//! to deter a class of "rage-quit" attacks. +//! +//! Based on research at + +use crate::{ + asset, log, session_rotation::Eras, BalanceOf, Config, Error, NegativeImbalanceOf, + NominatorSlashInEra, OffenceQueue, OffenceQueueEras, PagedExposure, Pallet, Perbill, + ProcessingOffence, SlashRewardFraction, SpanSlash, UnappliedSlash, UnappliedSlashes, + ValidatorSlashInEra, WeightInfo, +}; +use alloc::vec::Vec; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{ + ensure, + traits::{Defensive, DefensiveSaturating, Get, Imbalance, OnUnbalanced}, +}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{Saturating, Zero}, + DispatchResult, RuntimeDebug, WeakBoundedVec, Weight, +}; +use sp_staking::{EraIndex, StakingInterface}; + +/// The proportion of the slashing reward to be paid out on the first slashing detection. +/// This is f_1 in the paper. +const REWARD_F1: Perbill = Perbill::from_percent(50); + +/// The index of a slashing span - unique to each stash. +pub type SpanIndex = u32; + +// A range of start..end eras for a slashing span. +#[derive(Encode, Decode, TypeInfo)] +#[cfg_attr(test, derive(Debug, PartialEq))] +pub(crate) struct SlashingSpan { + pub(crate) index: SpanIndex, + pub(crate) start: EraIndex, + pub(crate) length: Option, // the ongoing slashing span has indeterminate length. +} + +impl SlashingSpan { + fn contains_era(&self, era: EraIndex) -> bool { + self.start <= era && self.length.map_or(true, |l| self.start.saturating_add(l) > era) + } +} + +/// An encoding of all of a nominator's slashing spans. +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct SlashingSpans { + // the index of the current slashing span of the nominator. different for + // every stash, resets when the account hits free balance 0. + span_index: SpanIndex, + // the start era of the most recent (ongoing) slashing span. + last_start: EraIndex, + // the last era at which a non-zero slash occurred. + last_nonzero_slash: EraIndex, + // all prior slashing spans' start indices, in reverse order (most recent first) + // encoded as offsets relative to the slashing span after it. + prior: Vec, +} + +impl SlashingSpans { + // creates a new record of slashing spans for a stash, starting at the beginning + // of the bonding period, relative to now. + pub(crate) fn new(window_start: EraIndex) -> Self { + SlashingSpans { + span_index: 0, + last_start: window_start, + // initialize to zero, as this structure is lazily created until + // the first slash is applied. setting equal to `window_start` would + // put a time limit on nominations. + last_nonzero_slash: 0, + prior: Vec::new(), + } + } + + // update the slashing spans to reflect the start of a new span at the era after `now` + // returns `true` if a new span was started, `false` otherwise. `false` indicates + // that internal state is unchanged. + pub(crate) fn end_span(&mut self, now: EraIndex) -> bool { + let next_start = now.defensive_saturating_add(1); + if next_start <= self.last_start { + return false + } + + let last_length = next_start.defensive_saturating_sub(self.last_start); + self.prior.insert(0, last_length); + self.last_start = next_start; + self.span_index.defensive_saturating_accrue(1); + true + } + + // an iterator over all slashing spans in _reverse_ order - most recent first. + pub(crate) fn iter(&'_ self) -> impl Iterator + '_ { + let mut last_start = self.last_start; + let mut index = self.span_index; + let last = SlashingSpan { index, start: last_start, length: None }; + let prior = self.prior.iter().cloned().map(move |length| { + let start = last_start.defensive_saturating_sub(length); + last_start = start; + index.defensive_saturating_reduce(1); + + SlashingSpan { index, start, length: Some(length) } + }); + + core::iter::once(last).chain(prior) + } + + /// Yields the era index where the most recent non-zero slash occurred. + pub fn last_nonzero_slash(&self) -> EraIndex { + self.last_nonzero_slash + } + + // prune the slashing spans against a window, whose start era index is given. + // + // If this returns `Some`, then it includes a range start..end of all the span + // indices which were pruned. + fn prune(&mut self, window_start: EraIndex) -> Option<(SpanIndex, SpanIndex)> { + let old_idx = self + .iter() + .skip(1) // skip ongoing span. + .position(|span| { + span.length + .map_or(false, |len| span.start.defensive_saturating_add(len) <= window_start) + }); + + let earliest_span_index = + self.span_index.defensive_saturating_sub(self.prior.len() as SpanIndex); + let pruned = match old_idx { + Some(o) => { + self.prior.truncate(o); + let new_earliest = + self.span_index.defensive_saturating_sub(self.prior.len() as SpanIndex); + Some((earliest_span_index, new_earliest)) + }, + None => None, + }; + + // readjust the ongoing span, if it started before the beginning of the window. + self.last_start = core::cmp::max(self.last_start, window_start); + pruned + } +} + +/// A slashing-span record for a particular stash. +#[derive(Encode, Decode, Default, TypeInfo, MaxEncodedLen)] +pub(crate) struct SpanRecord { + slashed: Balance, + paid_out: Balance, +} + +impl SpanRecord { + /// The value of stash balance slashed in this span. + #[cfg(test)] + pub(crate) fn amount(&self) -> &Balance { + &self.slashed + } +} + +/// Parameters for performing a slash. +#[derive(Clone)] +pub(crate) struct SlashParams<'a, T: 'a + Config> { + /// The stash account being slashed. + pub(crate) stash: &'a T::AccountId, + /// The proportion of the slash. + pub(crate) slash: Perbill, + /// The prior slash proportion of the validator if the validator has been reported multiple + /// times in the same era, and a new greater slash replaces the old one. + /// Invariant: slash > prior_slash + pub(crate) prior_slash: Perbill, + /// The exposure of the stash and all nominators. + pub(crate) exposure: &'a PagedExposure>, + /// The era where the offence occurred. + pub(crate) slash_era: EraIndex, + /// The first era in the current bonding period. + pub(crate) window_start: EraIndex, + /// The current era. + pub(crate) now: EraIndex, + /// The maximum percentage of a slash that ever gets paid out. + /// This is f_inf in the paper. + pub(crate) reward_proportion: Perbill, +} + +/// Represents an offence record within the staking system, capturing details about a slashing +/// event. +#[derive(Clone, Encode, Decode, TypeInfo, MaxEncodedLen, PartialEq, RuntimeDebug)] +pub struct OffenceRecord { + /// The account ID of the entity that reported the offence. + pub reporter: Option, + + /// Era at which the offence was reported. + pub reported_era: EraIndex, + + /// The specific page of the validator's exposure currently being processed. + /// + /// Since a validator's total exposure can span multiple pages, this field serves as a pointer + /// to the current page being evaluated. The processing order starts from the last page + /// and moves backward, decrementing this value with each processed page. + /// + /// This ensures that all pages are systematically handled, and it helps track when + /// the entire exposure has been processed. + pub exposure_page: u32, + + /// The fraction of the validator's stake to be slashed for this offence. + pub slash_fraction: Perbill, + + /// The previous slash fraction of the validator's stake before being updated. + /// If a new, higher slash fraction is reported, this field stores the prior fraction + /// that was overwritten. This helps in tracking changes in slashes across multiple reports for + /// the same era. + pub prior_slash_fraction: Perbill, +} + +/// Loads next offence in the processing offence and returns the offense record to be processed. +/// +/// Note: this can mutate the following storage +/// - `ProcessingOffence` +/// - `OffenceQueue` +/// - `OffenceQueueEras` +fn next_offence() -> Option<(EraIndex, T::AccountId, OffenceRecord)> { + let maybe_processing_offence = ProcessingOffence::::get(); + + if let Some((offence_era, offender, offence_record)) = maybe_processing_offence { + // If the exposure page is 0, then the offence has been processed. + if offence_record.exposure_page == 0 { + ProcessingOffence::::kill(); + return Some((offence_era, offender, offence_record)) + } + + // Update the next page. + ProcessingOffence::::put(( + offence_era, + &offender, + OffenceRecord { + // decrement the page index. + exposure_page: offence_record.exposure_page.defensive_saturating_sub(1), + ..offence_record.clone() + }, + )); + + return Some((offence_era, offender, offence_record)) + } + + // Nothing in processing offence. Try to enqueue the next offence. + let Some(mut eras) = OffenceQueueEras::::get() else { return None }; + let Some(&oldest_era) = eras.first() else { return None }; + + let mut offence_iter = OffenceQueue::::iter_prefix(oldest_era); + let next_offence = offence_iter.next(); + + if let Some((ref validator, ref offence_record)) = next_offence { + // Update the processing offence if the offence is multi-page. + if offence_record.exposure_page > 0 { + // update processing offence with the next page. + ProcessingOffence::::put(( + oldest_era, + validator.clone(), + OffenceRecord { + exposure_page: offence_record.exposure_page.defensive_saturating_sub(1), + ..offence_record.clone() + }, + )); + } + + // Remove from `OffenceQueue` + OffenceQueue::::remove(oldest_era, &validator); + } + + // If there are no offences left for the era, remove the era from `OffenceQueueEras`. + if offence_iter.next().is_none() { + if eras.len() == 1 { + // If there is only one era left, remove the entire queue. + OffenceQueueEras::::kill(); + } else { + // Remove the oldest era + eras.remove(0); + OffenceQueueEras::::put(eras); + } + } + + next_offence.map(|(v, o)| (oldest_era, v, o)) +} + +/// Infallible function to process an offence. +pub(crate) fn process_offence() -> Weight { + // We do manual weight racking for early-returns, and use benchmarks for the final two branches. + let mut incomplete_consumed_weight = Weight::from_parts(0, 0); + let mut add_db_reads_writes = |reads, writes| { + incomplete_consumed_weight += T::DbWeight::get().reads_writes(reads, writes); + }; + + add_db_reads_writes(1, 1); + let Some((offence_era, offender, offence_record)) = next_offence::() else { + return incomplete_consumed_weight + }; + + log!( + debug, + "🦹 Processing offence for {:?} in era {:?} with slash fraction {:?}", + offender, + offence_era, + offence_record.slash_fraction, + ); + + add_db_reads_writes(1, 0); + let reward_proportion = SlashRewardFraction::::get(); + + add_db_reads_writes(2, 0); + let Some(exposure) = + Eras::::get_paged_exposure(offence_era, &offender, offence_record.exposure_page) + else { + // this can only happen if the offence was valid at the time of reporting but became too old + // at the time of computing and should be discarded. + return incomplete_consumed_weight + }; + + let slash_page = offence_record.exposure_page; + let slash_defer_duration = T::SlashDeferDuration::get(); + let slash_era = offence_era.saturating_add(slash_defer_duration); + let window_start = offence_record.reported_era.saturating_sub(T::BondingDuration::get()); + + add_db_reads_writes(3, 3); + let Some(mut unapplied) = compute_slash::(SlashParams { + stash: &offender, + slash: offence_record.slash_fraction, + prior_slash: offence_record.prior_slash_fraction, + exposure: &exposure, + slash_era: offence_era, + window_start, + now: offence_record.reported_era, + reward_proportion, + }) else { + log!( + debug, + "🦹 Slash of {:?}% happened in {:?} (reported in {:?}) is discarded, as could not compute slash", + offence_record.slash_fraction, + offence_era, + offence_record.reported_era, + ); + // No slash to apply. Discard. + return incomplete_consumed_weight + }; + + >::deposit_event(super::Event::::SlashComputed { + offence_era, + slash_era, + offender: offender.clone(), + page: slash_page, + }); + + log!( + debug, + "🦹 Slash of {:?}% happened in {:?} (reported in {:?}) is computed", + offence_record.slash_fraction, + offence_era, + offence_record.reported_era, + ); + + // add the reporter to the unapplied slash. + unapplied.reporter = offence_record.reporter; + + if slash_defer_duration == 0 { + // Apply right away. + log!( + debug, + "🦹 applying slash instantly of {:?}% happened in {:?} (reported in {:?}) to {:?}", + offence_record.slash_fraction, + offence_era, + offence_record.reported_era, + offender, + ); + + apply_slash::(unapplied, offence_era); + T::WeightInfo::apply_slash().saturating_add(T::WeightInfo::process_offence_queue()) + } else { + // Historical Note: Previously, with BondingDuration = 28 and SlashDeferDuration = 27, + // slashes were applied at the start of the 28th era from `offence_era`. + // However, with paged slashing, applying slashes now takes multiple blocks. + // To account for this delay, slashes are now applied at the start of the 27th era from + // `offence_era`. + log!( + debug, + "🦹 deferring slash of {:?}% happened in {:?} (reported in {:?}) to {:?}", + offence_record.slash_fraction, + offence_era, + offence_record.reported_era, + slash_era, + ); + UnappliedSlashes::::insert( + slash_era, + (offender, offence_record.slash_fraction, slash_page), + unapplied, + ); + T::WeightInfo::process_offence_queue() + } +} + +/// Computes a slash of a validator and nominators. It returns an unapplied +/// record to be applied at some later point. Slashing metadata is updated in storage, +/// since unapplied records are only rarely intended to be dropped. +/// +/// The pending slash record returned does not have initialized reporters. Those have +/// to be set at a higher level, if any. +/// +/// If `nomintors_only` is set to `true`, only the nominator slashes will be computed. +pub(crate) fn compute_slash(params: SlashParams) -> Option> { + let (val_slashed, mut reward_payout) = slash_validator::(params.clone()); + + let mut nominators_slashed = Vec::new(); + let (nom_slashed, nom_reward_payout) = + slash_nominators::(params.clone(), &mut nominators_slashed); + reward_payout += nom_reward_payout; + + (nom_slashed + val_slashed > Zero::zero()).then_some(UnappliedSlash { + validator: params.stash.clone(), + own: val_slashed, + others: WeakBoundedVec::force_from( + nominators_slashed, + Some("slashed nominators not expected to be larger than the bounds"), + ), + reporter: None, + payout: reward_payout, + }) +} + +// doesn't apply any slash, but kicks out the validator if the misbehavior is from +// the most recent slashing span. +fn kick_out_if_recent(params: SlashParams) { + // these are not updated by era-span or end-span. + let mut reward_payout = Zero::zero(); + let mut val_slashed = Zero::zero(); + let mut spans = fetch_spans::( + params.stash, + params.window_start, + &mut reward_payout, + &mut val_slashed, + params.reward_proportion, + ); + + if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { + // Check https://github.com/paritytech/polkadot-sdk/issues/2650 for details + spans.end_span(params.now); + } +} + +/// Compute the slash for a validator. Returns the amount slashed and the reward payout. +fn slash_validator(params: SlashParams) -> (BalanceOf, BalanceOf) { + let own_slash = params.slash * params.exposure.exposure_metadata.own; + log!( + warn, + "🦹 slashing validator {:?} of stake: {:?} with {:?}% for {:?} in era {:?}", + params.stash, + params.exposure.exposure_metadata.own, + params.slash, + own_slash, + params.slash_era, + ); + + if own_slash == Zero::zero() { + // kick out the validator even if they won't be slashed, + // as long as the misbehavior is from their most recent slashing span. + kick_out_if_recent::(params); + return (Zero::zero(), Zero::zero()) + } + + // apply slash to validator. + let mut reward_payout = Zero::zero(); + let mut val_slashed = Zero::zero(); + + { + let mut spans = fetch_spans::( + params.stash, + params.window_start, + &mut reward_payout, + &mut val_slashed, + params.reward_proportion, + ); + + let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash); + + if target_span == Some(spans.span_index()) { + // misbehavior occurred within the current slashing span - end current span. + // Check for details. + spans.end_span(params.now); + } + } + + (val_slashed, reward_payout) +} + +/// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. +/// +/// Returns the total amount slashed and amount of reward to pay out. +fn slash_nominators( + params: SlashParams, + nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, +) -> (BalanceOf, BalanceOf) { + let mut reward_payout = BalanceOf::::zero(); + let mut total_slashed = BalanceOf::::zero(); + + nominators_slashed.reserve(params.exposure.exposure_page.others.len()); + for nominator in ¶ms.exposure.exposure_page.others { + let stash = &nominator.who; + let mut nom_slashed = Zero::zero(); + + // the era slash of a nominator always grows, if the validator had a new max slash for the + // era. + let era_slash = { + let own_slash_prior = params.prior_slash * nominator.value; + let own_slash_by_validator = params.slash * nominator.value; + let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior); + + let mut era_slash = + NominatorSlashInEra::::get(¶ms.slash_era, stash).unwrap_or_else(Zero::zero); + era_slash += own_slash_difference; + NominatorSlashInEra::::insert(¶ms.slash_era, stash, &era_slash); + + era_slash + }; + + // compare the era slash against other eras in the same span. + { + let mut spans = fetch_spans::( + stash, + params.window_start, + &mut reward_payout, + &mut nom_slashed, + params.reward_proportion, + ); + + let target_span = spans.compare_and_update_span_slash(params.slash_era, era_slash); + + if target_span == Some(spans.span_index()) { + // end the span, but don't chill the nominator. + spans.end_span(params.now); + } + } + nominators_slashed.push((stash.clone(), nom_slashed)); + total_slashed.saturating_accrue(nom_slashed); + } + + (total_slashed, reward_payout) +} + +// helper struct for managing a set of spans we are currently inspecting. +// writes alterations to disk on drop, but only if a slash has been carried out. +// +// NOTE: alterations to slashing metadata should not be done after this is dropped. +// dropping this struct applies any necessary slashes, which can lead to free balance +// being 0, and the account being garbage-collected -- a dead account should get no new +// metadata. +struct InspectingSpans<'a, T: Config + 'a> { + dirty: bool, + window_start: EraIndex, + stash: &'a T::AccountId, + spans: SlashingSpans, + paid_out: &'a mut BalanceOf, + slash_of: &'a mut BalanceOf, + reward_proportion: Perbill, + _marker: core::marker::PhantomData, +} + +// fetches the slashing spans record for a stash account, initializing it if necessary. +fn fetch_spans<'a, T: Config + 'a>( + stash: &'a T::AccountId, + window_start: EraIndex, + paid_out: &'a mut BalanceOf, + slash_of: &'a mut BalanceOf, + reward_proportion: Perbill, +) -> InspectingSpans<'a, T> { + let spans = crate::SlashingSpans::::get(stash).unwrap_or_else(|| { + let spans = SlashingSpans::new(window_start); + crate::SlashingSpans::::insert(stash, &spans); + spans + }); + + InspectingSpans { + dirty: false, + window_start, + stash, + spans, + slash_of, + paid_out, + reward_proportion, + _marker: core::marker::PhantomData, + } +} + +impl<'a, T: 'a + Config> InspectingSpans<'a, T> { + fn span_index(&self) -> SpanIndex { + self.spans.span_index + } + + fn end_span(&mut self, now: EraIndex) { + self.dirty = self.spans.end_span(now) || self.dirty; + } + + // add some value to the slash of the staker. + // invariant: the staker is being slashed for non-zero value here + // although `amount` may be zero, as it is only a difference. + fn add_slash(&mut self, amount: BalanceOf, slash_era: EraIndex) { + *self.slash_of += amount; + self.spans.last_nonzero_slash = core::cmp::max(self.spans.last_nonzero_slash, slash_era); + } + + // find the span index of the given era, if covered. + fn era_span(&self, era: EraIndex) -> Option { + self.spans.iter().find(|span| span.contains_era(era)) + } + + // compares the slash in an era to the overall current span slash. + // if it's higher, applies the difference of the slashes and then updates the span on disk. + // + // returns the span index of the era where the slash occurred, if any. + fn compare_and_update_span_slash( + &mut self, + slash_era: EraIndex, + slash: BalanceOf, + ) -> Option { + let target_span = self.era_span(slash_era)?; + let span_slash_key = (self.stash.clone(), target_span.index); + let mut span_record = SpanSlash::::get(&span_slash_key); + let mut changed = false; + + let reward = if span_record.slashed < slash { + // new maximum span slash. apply the difference. + let difference = slash.defensive_saturating_sub(span_record.slashed); + span_record.slashed = slash; + + // compute reward. + let reward = + REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out); + + self.add_slash(difference, slash_era); + changed = true; + + reward + } else if span_record.slashed == slash { + // compute reward. no slash difference to apply. + REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out) + } else { + Zero::zero() + }; + + if !reward.is_zero() { + changed = true; + span_record.paid_out += reward; + *self.paid_out += reward; + } + + if changed { + self.dirty = true; + SpanSlash::::insert(&span_slash_key, &span_record); + } + + Some(target_span.index) + } +} + +impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { + fn drop(&mut self) { + // only update on disk if we slashed this account. + if !self.dirty { + return + } + + if let Some((start, end)) = self.spans.prune(self.window_start) { + for span_index in start..end { + SpanSlash::::remove(&(self.stash.clone(), span_index)); + } + } + + crate::SlashingSpans::::insert(self.stash, &self.spans); + } +} + +/// Clear slashing metadata for an obsolete era. +pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { + #[allow(deprecated)] + ValidatorSlashInEra::::remove_prefix(&obsolete_era, None); + #[allow(deprecated)] + NominatorSlashInEra::::remove_prefix(&obsolete_era, None); +} + +/// Clear slashing metadata for a dead account. +pub(crate) fn clear_stash_metadata( + stash: &T::AccountId, + num_slashing_spans: u32, +) -> DispatchResult { + let spans = match crate::SlashingSpans::::get(stash) { + None => return Ok(()), + Some(s) => s, + }; + + ensure!( + num_slashing_spans as usize >= spans.iter().count(), + Error::::IncorrectSlashingSpans + ); + + crate::SlashingSpans::::remove(stash); + + // kill slashing-span metadata for account. + // + // this can only happen while the account is staked _if_ they are completely slashed. + // in that case, they may re-bond, but it would count again as span 0. Further ancient + // slashes would slash into this new bond, since metadata has now been cleared. + for span in spans.iter() { + SpanSlash::::remove(&(stash.clone(), span.index)); + } + + Ok(()) +} + +// apply the slash to a stash account, deducting any missing funds from the reward +// payout, saturating at 0. this is mildly unfair but also an edge-case that +// can only occur when overlapping locked funds have been slashed. +pub fn do_slash( + stash: &T::AccountId, + value: BalanceOf, + reward_payout: &mut BalanceOf, + slashed_imbalance: &mut NegativeImbalanceOf, + slash_era: EraIndex, +) { + let mut ledger = + match Pallet::::ledger(sp_staking::StakingAccount::Stash(stash.clone())).defensive() { + Ok(ledger) => ledger, + Err(_) => return, // nothing to do. + }; + + let value = ledger.slash(value, asset::existential_deposit::(), slash_era); + if value.is_zero() { + // nothing to do + return + } + + // Skip slashing for virtual stakers. The pallets managing them should handle the slashing. + if !Pallet::::is_virtual_staker(stash) { + let (imbalance, missing) = asset::slash::(stash, value); + slashed_imbalance.subsume(imbalance); + + if !missing.is_zero() { + // deduct overslash from the reward payout + *reward_payout = reward_payout.saturating_sub(missing); + } + } + + let _ = ledger + .update() + .defensive_proof("ledger fetched from storage so it exists in storage; qed."); + + // trigger the event + >::deposit_event(super::Event::::Slashed { staker: stash.clone(), amount: value }); +} + +/// Apply a previously-unapplied slash. +pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash, slash_era: EraIndex) { + let mut slashed_imbalance = NegativeImbalanceOf::::zero(); + let mut reward_payout = unapplied_slash.payout; + + if unapplied_slash.own > Zero::zero() { + do_slash::( + &unapplied_slash.validator, + unapplied_slash.own, + &mut reward_payout, + &mut slashed_imbalance, + slash_era, + ); + } + + for &(ref nominator, nominator_slash) in &unapplied_slash.others { + if nominator_slash.is_zero() { + continue + } + + do_slash::( + nominator, + nominator_slash, + &mut reward_payout, + &mut slashed_imbalance, + slash_era, + ); + } + + pay_reporters::( + reward_payout, + slashed_imbalance, + &unapplied_slash.reporter.map(|v| crate::vec![v]).unwrap_or_default(), + ); +} + +/// Apply a reward payout to some reporters, paying the rewards out of the slashed imbalance. +fn pay_reporters( + reward_payout: BalanceOf, + slashed_imbalance: NegativeImbalanceOf, + reporters: &[T::AccountId], +) { + if reward_payout.is_zero() || reporters.is_empty() { + // nobody to pay out to or nothing to pay; + // just treat the whole value as slashed. + T::Slash::on_unbalanced(slashed_imbalance); + return + } + + // take rewards out of the slashed imbalance. + let reward_payout = reward_payout.min(slashed_imbalance.peek()); + let (mut reward_payout, mut value_slashed) = slashed_imbalance.split(reward_payout); + + let per_reporter = reward_payout.peek() / (reporters.len() as u32).into(); + for reporter in reporters { + let (reporter_reward, rest) = reward_payout.split(per_reporter); + reward_payout = rest; + + // this cancels out the reporter reward imbalance internally, leading + // to no change in total issuance. + asset::deposit_slashed::(reporter, reporter_reward); + } + + // the rest goes to the on-slash imbalance handler (e.g. treasury) + value_slashed.subsume(reward_payout); // remainder of reward division remains. + T::Slash::on_unbalanced(value_slashed); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn span_contains_era() { + // unbounded end + let span = SlashingSpan { index: 0, start: 1000, length: None }; + assert!(!span.contains_era(0)); + assert!(!span.contains_era(999)); + + assert!(span.contains_era(1000)); + assert!(span.contains_era(1001)); + assert!(span.contains_era(10000)); + + // bounded end - non-inclusive range. + let span = SlashingSpan { index: 0, start: 1000, length: Some(10) }; + assert!(!span.contains_era(0)); + assert!(!span.contains_era(999)); + + assert!(span.contains_era(1000)); + assert!(span.contains_era(1001)); + assert!(span.contains_era(1009)); + assert!(!span.contains_era(1010)); + assert!(!span.contains_era(1011)); + } + + #[test] + fn single_slashing_span() { + let spans = SlashingSpans { + span_index: 0, + last_start: 1000, + last_nonzero_slash: 0, + prior: Vec::new(), + }; + + assert_eq!( + spans.iter().collect::>(), + vec![SlashingSpan { index: 0, start: 1000, length: None }], + ); + } + + #[test] + fn many_prior_spans() { + let spans = SlashingSpans { + span_index: 10, + last_start: 1000, + last_nonzero_slash: 0, + prior: vec![10, 9, 8, 10], + }; + + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { index: 10, start: 1000, length: None }, + SlashingSpan { index: 9, start: 990, length: Some(10) }, + SlashingSpan { index: 8, start: 981, length: Some(9) }, + SlashingSpan { index: 7, start: 973, length: Some(8) }, + SlashingSpan { index: 6, start: 963, length: Some(10) }, + ], + ) + } + + #[test] + fn pruning_spans() { + let mut spans = SlashingSpans { + span_index: 10, + last_start: 1000, + last_nonzero_slash: 0, + prior: vec![10, 9, 8, 10], + }; + + assert_eq!(spans.prune(981), Some((6, 8))); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { index: 10, start: 1000, length: None }, + SlashingSpan { index: 9, start: 990, length: Some(10) }, + SlashingSpan { index: 8, start: 981, length: Some(9) }, + ], + ); + + assert_eq!(spans.prune(982), None); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { index: 10, start: 1000, length: None }, + SlashingSpan { index: 9, start: 990, length: Some(10) }, + SlashingSpan { index: 8, start: 981, length: Some(9) }, + ], + ); + + assert_eq!(spans.prune(989), None); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { index: 10, start: 1000, length: None }, + SlashingSpan { index: 9, start: 990, length: Some(10) }, + SlashingSpan { index: 8, start: 981, length: Some(9) }, + ], + ); + + assert_eq!(spans.prune(1000), Some((8, 10))); + assert_eq!( + spans.iter().collect::>(), + vec![SlashingSpan { index: 10, start: 1000, length: None },], + ); + + assert_eq!(spans.prune(2000), None); + assert_eq!( + spans.iter().collect::>(), + vec![SlashingSpan { index: 10, start: 2000, length: None },], + ); + + // now all in one shot. + let mut spans = SlashingSpans { + span_index: 10, + last_start: 1000, + last_nonzero_slash: 0, + prior: vec![10, 9, 8, 10], + }; + assert_eq!(spans.prune(2000), Some((6, 10))); + assert_eq!( + spans.iter().collect::>(), + vec![SlashingSpan { index: 10, start: 2000, length: None },], + ); + } + + #[test] + fn ending_span() { + let mut spans = SlashingSpans { + span_index: 1, + last_start: 10, + last_nonzero_slash: 0, + prior: Vec::new(), + }; + + assert!(spans.end_span(10)); + + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { index: 2, start: 11, length: None }, + SlashingSpan { index: 1, start: 10, length: Some(1) }, + ], + ); + + assert!(spans.end_span(15)); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { index: 3, start: 16, length: None }, + SlashingSpan { index: 2, start: 11, length: Some(5) }, + SlashingSpan { index: 1, start: 10, length: Some(1) }, + ], + ); + + // does nothing if not a valid end. + assert!(!spans.end_span(15)); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { index: 3, start: 16, length: None }, + SlashingSpan { index: 2, start: 11, length: Some(5) }, + SlashingSpan { index: 1, start: 10, length: Some(1) }, + ], + ); + } +} diff --git a/substrate/frame/staking-async/src/testing_utils.rs b/substrate/frame/staking-async/src/testing_utils.rs new file mode 100644 index 0000000000000..a5ef580414aba --- /dev/null +++ b/substrate/frame/staking-async/src/testing_utils.rs @@ -0,0 +1,261 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Testing utils for staking. Provides some common functions to setup staking state, such as +//! bonding validators, nominators, and generating different types of solutions. + +use crate::{Pallet as Staking, *}; +use frame_benchmarking::account; +use frame_system::RawOrigin; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, +}; +use sp_io::hashing::blake2_256; + +use frame_election_provider_support::SortedListProvider; +use frame_support::pallet_prelude::*; +use sp_runtime::{traits::StaticLookup, Perbill}; + +const SEED: u32 = 0; + +/// This function removes all validators and nominators from storage. +pub fn clear_validators_and_nominators() { + #[allow(deprecated)] + Validators::::remove_all(); + + // whenever we touch nominators counter we should update `T::VoterList` as well. + #[allow(deprecated)] + Nominators::::remove_all(); + + // NOTE: safe to call outside block production + T::VoterList::unsafe_clear(); +} + +/// Grab a funded user. +pub fn create_funded_user( + string: &'static str, + n: u32, + balance_factor: u32, +) -> T::AccountId { + let user = account(string, n, SEED); + let balance = asset::existential_deposit::() * balance_factor.into(); + let _ = asset::set_stakeable_balance::(&user, balance); + user +} + +/// Grab a funded user with max Balance. +pub fn create_funded_user_with_balance( + string: &'static str, + n: u32, + balance: BalanceOf, +) -> T::AccountId { + let user = account(string, n, SEED); + let _ = asset::set_stakeable_balance::(&user, balance); + user +} + +/// Create a stash and controller pair. +pub fn create_stash_controller( + n: u32, + balance_factor: u32, + destination: RewardDestination, +) -> Result<(T::AccountId, T::AccountId), &'static str> { + let staker = create_funded_user::("stash", n, balance_factor); + let amount = + asset::existential_deposit::().max(1u64.into()) * (balance_factor / 10).max(1).into(); + Staking::::bond(RawOrigin::Signed(staker.clone()).into(), amount, destination)?; + Ok((staker.clone(), staker)) +} + +/// Create a unique stash and controller pair. +pub fn create_unique_stash_controller( + n: u32, + balance_factor: u32, + destination: RewardDestination, + dead_controller: bool, +) -> Result<(T::AccountId, T::AccountId), &'static str> { + let stash = create_funded_user::("stash", n, balance_factor); + + let controller = if dead_controller { + create_funded_user::("controller", n, 0) + } else { + create_funded_user::("controller", n, balance_factor) + }; + let amount = asset::existential_deposit::() * (balance_factor / 10).max(1).into(); + Staking::::bond(RawOrigin::Signed(stash.clone()).into(), amount, destination)?; + + // update ledger to be a *different* controller to stash + if let Some(l) = Ledger::::take(&stash) { + >::insert(&controller, l); + } + // update bonded account to be unique controller + >::insert(&stash, &controller); + + Ok((stash, controller)) +} + +/// Create a stash and controller pair with fixed balance. +pub fn create_stash_controller_with_balance( + n: u32, + balance: crate::BalanceOf, + destination: RewardDestination, +) -> Result<(T::AccountId, T::AccountId), &'static str> { + let staker = create_funded_user_with_balance::("stash", n, balance); + Staking::::bond(RawOrigin::Signed(staker.clone()).into(), balance, destination)?; + Ok((staker.clone(), staker)) +} + +/// Create a stash and controller pair, where payouts go to a dead payee account. This is used to +/// test worst case payout scenarios. +pub fn create_stash_and_dead_payee( + n: u32, + balance_factor: u32, +) -> Result<(T::AccountId, T::AccountId), &'static str> { + let staker = create_funded_user::("stash", n, 0); + // payee has no funds + let payee = create_funded_user::("payee", n, 0); + let amount = asset::existential_deposit::() * (balance_factor / 10).max(1).into(); + Staking::::bond( + RawOrigin::Signed(staker.clone()).into(), + amount, + RewardDestination::Account(payee), + )?; + Ok((staker.clone(), staker)) +} + +/// create `max` validators. +pub fn create_validators( + max: u32, + balance_factor: u32, +) -> Result>, &'static str> { + create_validators_with_seed::(max, balance_factor, 0) +} + +/// create `max` validators, with a seed to help unintentional prevent account collisions. +pub fn create_validators_with_seed( + max: u32, + balance_factor: u32, + seed: u32, +) -> Result>, &'static str> { + let mut validators: Vec> = Vec::with_capacity(max as usize); + for i in 0..max { + let (stash, controller) = + create_stash_controller::(i + seed, balance_factor, RewardDestination::Staked)?; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; + Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; + let stash_lookup = T::Lookup::unlookup(stash); + validators.push(stash_lookup); + } + Ok(validators) +} + +/// This function generates validators and nominators who are randomly nominating +/// `edge_per_nominator` random validators (until `to_nominate` if provided). +/// +/// NOTE: This function will remove any existing validators or nominators to ensure +/// we are working with a clean state. +/// +/// Parameters: +/// - `validators`: number of bonded validators +/// - `nominators`: number of bonded nominators. +/// - `edge_per_nominator`: number of edge (vote) per nominator. +/// - `randomize_stake`: whether to randomize the stakes. +/// - `to_nominate`: if `Some(n)`, only the first `n` bonded validator are voted upon. Else, all of +/// them are considered and `edge_per_nominator` random validators are voted for. +/// +/// Return the validators chosen to be nominated. +pub fn create_validators_with_nominators_for_era( + validators: u32, + nominators: u32, + edge_per_nominator: usize, + randomize_stake: bool, + to_nominate: Option, +) -> Result>, &'static str> { + clear_validators_and_nominators::(); + + let mut validators_stash: Vec> = Vec::with_capacity(validators as usize); + let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); + + // Create validators + for i in 0..validators { + let balance_factor = if randomize_stake { rng.next_u32() % 255 + 10 } else { 100u32 }; + let (v_stash, v_controller) = + create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; + Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; + let stash_lookup = T::Lookup::unlookup(v_stash.clone()); + validators_stash.push(stash_lookup.clone()); + } + + let to_nominate = to_nominate.unwrap_or(validators_stash.len() as u32) as usize; + let validator_chosen = validators_stash[0..to_nominate].to_vec(); + + // Create nominators + for j in 0..nominators { + let balance_factor = if randomize_stake { rng.next_u32() % 255 + 10 } else { 100u32 }; + let (_n_stash, n_controller) = + create_stash_controller::(u32::MAX - j, balance_factor, RewardDestination::Staked)?; + + // Have them randomly validate + let mut available_validators = validator_chosen.clone(); + let mut selected_validators: Vec> = + Vec::with_capacity(edge_per_nominator); + + for _ in 0..validators.min(edge_per_nominator as u32) { + let selected = rng.next_u32() as usize % available_validators.len(); + let validator = available_validators.remove(selected); + selected_validators.push(validator); + if available_validators.is_empty() { + break + } + } + Staking::::nominate( + RawOrigin::Signed(n_controller.clone()).into(), + selected_validators, + )?; + } + + ValidatorCount::::put(validators); + + Ok(validator_chosen) +} + +/// get the current era. +pub fn current_era() -> EraIndex { + CurrentEra::::get().unwrap_or(0) +} + +pub fn migrate_to_old_currency(who: T::AccountId) { + use frame_support::traits::LockableCurrency; + let staked = asset::staked::(&who); + + // apply locks (this also adds a consumer). + T::OldCurrency::set_lock( + STAKING_ID, + &who, + staked, + frame_support::traits::WithdrawReasons::all(), + ); + // remove holds. + asset::kill_stake::(&who).expect("remove hold failed"); + + // replicate old behaviour of explicit increment of consumer. + frame_system::Pallet::::inc_consumers(&who).expect("increment consumer failed"); +} diff --git a/substrate/frame/staking-async/src/tests/bonding.rs b/substrate/frame/staking-async/src/tests/bonding.rs new file mode 100644 index 0000000000000..5d4d4a651d4fd --- /dev/null +++ b/substrate/frame/staking-async/src/tests/bonding.rs @@ -0,0 +1,1991 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use frame_support::{hypothetically_ok, traits::Currency}; +use sp_staking::{Stake, StakingInterface}; + +#[test] +fn existing_stash_cannot_bond() { + ExtBuilder::default().build_and_execute(|| { + assert!(StakingLedger::::is_bonded(11.into())); + + // cannot bond again. + assert_noop!( + Staking::bond(RuntimeOrigin::signed(11), 7, RewardDestination::Staked), + Error::::AlreadyBonded, + ); + }); +} + +#[test] +fn existing_controller_cannot_bond() { + ExtBuilder::default().build_and_execute(|| { + let (_stash, controller) = testing_utils::create_unique_stash_controller::( + 0, + 7, + RewardDestination::Staked, + false, + ) + .unwrap(); + + assert_noop!( + Staking::bond(RuntimeOrigin::signed(controller), 7, RewardDestination::Staked), + Error::::AlreadyPaired, + ); + }); +} + +#[test] +fn cannot_transfer_staked_balance() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert_eq!(asset::staked::(&11), 1000); + // stake + ed + assert_eq!(asset::total_balance::(&11), 1000 + 1); + // nothing more to stake + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::free_to_stake::(&11), 0); + + // cannot transfer + assert_noop!( + Balances::transfer_allow_death(RuntimeOrigin::signed(11), 21, 1), + TokenError::Frozen, + ); + + let _ = asset::set_stakeable_balance::(&11, 10000); + + // now it can + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(11), 21, 1)); + }); +} + +#[test] +fn cannot_reserve_staked_balance() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(asset::staked::(&11), 1000); + + // Confirm account 11 cannot reserve as a result + assert_noop!( + Balances::reserve(&11, 2), + pallet_balances::Error::::InsufficientBalance + ); + assert_noop!(Balances::reserve(&11, 1), DispatchError::ConsumerRemaining); + + // Give account 11 extra free balance + let _ = asset::set_stakeable_balance::(&11, 1000 + 1000); + assert_eq!(asset::free_to_stake::(&11), 1000); + + // Confirm account 11 can now reserve balance + assert_ok!(Balances::reserve(&11, 500)); + + // free to stake balance has reduced + assert_eq!(asset::free_to_stake::(&11), 500); + }); +} + +#[test] +fn cannot_bond_less_than_ed() { + ExtBuilder::default().existential_deposit(10).build_and_execute(|| { + // given + assert_eq!(asset::staked_and_not::(&1), (0, 10)); + + // cannot bond less than existential deposit + assert_noop!( + Staking::bond(RuntimeOrigin::signed(1), 9, RewardDestination::Staked), + Error::::InsufficientBond, + ); + + // can bond existential deposit + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 10, RewardDestination::Staked)); + assert_eq!(asset::staked_and_not::(&1), (10, 0)); + }); +} + +#[test] +fn do_not_die_when_active_is_ed() { + let ed = 10; + ExtBuilder::default() + .existential_deposit(ed) + .balance_factor(ed) + .build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { + stash: 21, + total: 1000 * ed, + active: 1000 * ed, + unlocking: Default::default(), + } + ); + + // when unbond all of it except ed. + assert_ok!(Staking::unbond(RuntimeOrigin::signed(21), 999 * ed)); + + Session::roll_until_active_era(4); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(21), 100)); + + // then + assert_eq!( + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { + stash: 21, + total: ed, + active: ed, + unlocking: Default::default(), + } + ); + }) +} + +#[test] +fn bond_truncated_to_maximum_possible() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!(asset::free_to_stake::(&1), 10); + + // then bonding 100 is equal to bonding 10 + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 100, RewardDestination::Staked)); + assert_eq!(Staking::ledger(1.into()).unwrap().total, 10); + }); +} + +#[test] +fn bond_extra_works() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // given + asset::set_stakeable_balance::(&11, 1000000); + + // when + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 100)); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: Default::default(), + } + ); + + // when + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), Balance::max_value())); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000000, + active: 1000000, + unlocking: Default::default(), + } + ); + }); +} + +#[test] +fn bond_extra_controller_bad_state_works() { + ExtBuilder::default().try_state(false).build_and_execute(|| { + assert_eq!(StakingLedger::::get(StakingAccount::Stash(31)).unwrap().stash, 31); + + // simulate ledger in bad state: the controller 41 is associated to the stash 31 and 41. + Bonded::::insert(31, 41); + + // we confirm that the ledger is in bad state: 31 has 41 as controller and when fetching + // the ledger associated with the controller 41, its stash is 41 (and not 31). + assert_eq!(Ledger::::get(41).unwrap().stash, 41); + + // if the ledger is in this bad state, the `bond_extra` should fail. + // TODO: remove this BadState, we should no longer have it at all. + assert_noop!(Staking::bond_extra(RuntimeOrigin::signed(31), 10), Error::::BadState); + }) +} + +#[test] +fn bond_extra_updates_exposure_later_if_exposed() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // given + assert_eq!(active_era(), 1); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + assert_eq!( + Staking::eras_stakers(active_era(), &11), + Exposure { total: 1000, own: 1000, others: vec![] } + ); + + // when + asset::set_stakeable_balance::(&11, 1000000); + Staking::bond_extra(RuntimeOrigin::signed(11), 100).unwrap(); + + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: Default::default(), + } + ); + // Exposure is a snapshot! only updated after the next era update. + assert_ne!( + Staking::eras_stakers(active_era(), &11), + Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } + ); + + // when trigger next era + Session::roll_until_active_era(2); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: Default::default(), + } + ); + // Exposure is now updated + assert_eq!( + Staking::eras_stakers(active_era(), &11), + Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] } + ); + }) +} + +#[test] +fn cannot_bond_extra_to_lower_than_ed() { + ExtBuilder::default() + .existential_deposit(11) + .balance_factor(11) + .build_and_execute(|| { + // initial stuff. + assert_eq!( + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { + stash: 21, + total: 11 * 1000, + active: 11 * 1000, + unlocking: Default::default(), + } + ); + + // unbond all of it. must be chilled first. + assert_ok!(Staking::chill(RuntimeOrigin::signed(21))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(21), 11 * 1000)); + + assert_eq!( + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { + stash: 21, + total: 11 * 1000, + active: 0, + unlocking: bounded_vec![UnlockChunk { + value: 11 * 1000, + era: active_era() + 3 + }], + } + ); + + // now bond a wee bit more + assert_noop!( + Staking::bond_extra(RuntimeOrigin::signed(21), 5), + Error::::InsufficientBond, + ); + }) +} + +#[test] +fn unbonding_works() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // when + Staking::unbond(RuntimeOrigin::signed(11), 500).unwrap(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Unbonded { stash: 11, amount: 500 }] + ); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 500, + unlocking: bounded_vec![UnlockChunk { value: 500, era: active_era() + 3 }], + }, + ); + + // when + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + assert_eq!(staking_events_since_last_call(), vec![]); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 500, + unlocking: bounded_vec![UnlockChunk { value: 500, era: active_era() + 3 }], + }, + ); + + // when + Session::roll_until_active_era(2); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 500, + unlocking: bounded_vec![UnlockChunk { value: 500, era: 1 + 3 }], + }, + ); + + // when + Session::roll_until_active_era(3); + let _ = staking_events_since_last_call(); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + assert_eq!(staking_events_since_last_call(), vec![]); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 500, + unlocking: bounded_vec![UnlockChunk { value: 500, era: 1 + 3 }], + }, + ); + + // when + Session::roll_until_active_era(4); + let _ = staking_events_since_last_call(); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Withdrawn { stash: 11, amount: 500 }] + ); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 500, active: 500, unlocking: bounded_vec![] }, + ); + }); +} + +#[test] +fn unbonding_multi_chunk() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // when + Staking::unbond(RuntimeOrigin::signed(11), 500).unwrap(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Unbonded { stash: 11, amount: 500 }] + ); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 500, + unlocking: bounded_vec![UnlockChunk { value: 500, era: active_era() + 3 }], + }, + ); + + // when + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + Staking::unbond(RuntimeOrigin::signed(11), 250).unwrap(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Unbonded { stash: 11, amount: 250 }] + ); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 250, + unlocking: bounded_vec![ + UnlockChunk { value: 500, era: 1 + 3 }, + UnlockChunk { value: 250, era: 2 + 3 } + ], + }, + ); + + // when + Session::roll_until_active_era(4); + let _ = staking_events_since_last_call(); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Withdrawn { stash: 11, amount: 500 }] + ); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 500, + active: 250, + unlocking: bounded_vec![UnlockChunk { value: 250, era: 2 + 3 }], + }, + ); + + // when + Session::roll_until_active_era(5); + let _ = staking_events_since_last_call(); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Withdrawn { stash: 11, amount: 250 }] + ); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { stash: 11, total: 250, active: 250, unlocking: bounded_vec![] }, + ); + }); +} + +#[test] +fn full_unbonding_works() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(asset::free_to_stake::(&11), 0); + // cannot fully unbond as they are a validator + assert_noop!( + Staking::unbond(RuntimeOrigin::signed(11), 1000), + Error::::InsufficientBond + ); + + // first chill + assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); + + // then fully unbond + assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 1000)); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Chilled { stash: 11 }, Event::Unbonded { stash: 11, amount: 1000 }] + ); + + // wait 3 eras + Session::roll_until_active_era(active_era() + 3); + let _ = staking_events_since_last_call(); + + // done + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + assert_eq!( + staking_events_since_last_call(), + vec![Event::StakerRemoved { stash: 11 }, Event::Withdrawn { stash: 11, amount: 1000 }] + ); + + // storage is clean, balance is unheld + StakingLedger::::assert_stash_killed(11); + assert_eq!(asset::free_to_stake::(&11), 1000); + }); +} + +#[test] +fn unbonding_merges_if_era_exists() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // when + Staking::unbond(RuntimeOrigin::signed(11), 500).unwrap(); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 500, + unlocking: bounded_vec![UnlockChunk { value: 500, era: 1 + 3 }], + }, + ); + + // when + Staking::unbond(RuntimeOrigin::signed(11), 250).unwrap(); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 250, + unlocking: bounded_vec![UnlockChunk { value: 500 + 250, era: 1 + 3 }], + }, + ); + }); +} + +#[test] +fn unbonding_rejects_if_max_chunks() { + ExtBuilder::default() + .max_unlock_chunks(3) + .bonding_duration(7) + .build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // when + Staking::unbond(RuntimeOrigin::signed(11), 250).unwrap(); + Session::roll_until_active_era(2); + Staking::unbond(RuntimeOrigin::signed(11), 250).unwrap(); + Session::roll_until_active_era(3); + Staking::unbond(RuntimeOrigin::signed(11), 250).unwrap(); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 250, + unlocking: bounded_vec![ + UnlockChunk { value: 250, era: 1 + 7 }, + UnlockChunk { value: 250, era: 2 + 7 }, + UnlockChunk { value: 250, era: 3 + 7 }, + ], + }, + ); + + // when + Session::roll_until_active_era(4); + assert_noop!(Staking::unbond(RuntimeOrigin::signed(11), 100), Error::::NoMoreChunks,); + }); +} + +#[test] +fn unbonding_auto_withdraws_if_any() { + ExtBuilder::default().max_unlock_chunks(3).build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // when + Staking::unbond(RuntimeOrigin::signed(11), 250).unwrap(); + Session::roll_until_active_era(2); + Staking::unbond(RuntimeOrigin::signed(11), 250).unwrap(); + Session::roll_until_active_era(3); + Staking::unbond(RuntimeOrigin::signed(11), 250).unwrap(); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 250, + unlocking: bounded_vec![ + UnlockChunk { value: 250, era: 1 + 3 }, + UnlockChunk { value: 250, era: 2 + 3 }, + UnlockChunk { value: 250, era: 3 + 3 }, + ], + }, + ); + + // when + Session::roll_until_active_era(4); + // then they can unbond more, as it does auto withdraw of the first chunk + Staking::unbond(RuntimeOrigin::signed(11), 100).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 750, + active: 150, + unlocking: bounded_vec![ + UnlockChunk { value: 250, era: 2 + 3 }, + UnlockChunk { value: 250, era: 3 + 3 }, + UnlockChunk { value: 100, era: 4 + 3 }, + ], + }, + ); + }); +} + +#[test] +fn unbonding_caps_to_ledger_active() { + ExtBuilder::default().set_status(11, StakerStatus::Idle).build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // when + Staking::unbond(RuntimeOrigin::signed(11), 1500).unwrap(); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 0, + unlocking: bounded_vec![UnlockChunk { value: 1000, era: 1 + 3 }], + } + ); + }); +} + +#[test] +fn unbond_avoids_dust() { + ExtBuilder::default() + .existential_deposit(5) + .set_status(11, StakerStatus::Idle) + .build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // when + Staking::unbond(RuntimeOrigin::signed(11), 998).unwrap(); + + // then + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 0, + unlocking: bounded_vec![UnlockChunk { value: 1000, era: 1 + 3 }], + } + ); + }); +} + +#[test] +fn unbond_rejects_if_min_role_bond_not_met() { + ExtBuilder::default().min_validator_bond(100).build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // then + assert_noop!(Staking::unbond(RuntimeOrigin::signed(11), 950), Error::::InsufficientBond); + + // can unbond to a value less than 100 remaining + hypothetically_ok!(Staking::unbond(RuntimeOrigin::signed(11), 850)); + + hypothetically!({ + // can also chill and then unbond more. + assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 950)); + }) + }) +} + +#[test] +fn reducing_max_unlocking_chunks_abrupt() { + // Concern is on validators only + ExtBuilder::default().build_and_execute(|| { + // given a staker at era=10 and MaxUnlockChunks set to 2 + MaxUnlockingChunks::set(2); + Session::roll_until_active_era(10); + + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 300, RewardDestination::Staked)); + assert!(matches!(Staking::ledger(3.into()), Ok(_))); + + // when staker unbonds + assert_ok!(Staking::unbond(RuntimeOrigin::signed(3), 20)); + + // then an unlocking chunk is added at `current_era + bonding_duration` + // => 10 + 3 = 13 + let expected_unlocking: BoundedVec, MaxUnlockingChunks> = + bounded_vec![UnlockChunk { value: 20 as Balance, era: 13 as EraIndex }]; + assert!(matches!(Staking::ledger(3.into()), + Ok(StakingLedger { + unlocking, + .. + }) if unlocking == expected_unlocking)); + + // when staker unbonds at next era + Session::roll_until_active_era(11); + + assert_ok!(Staking::unbond(RuntimeOrigin::signed(3), 50)); + + // then another unlock chunk is added + let expected_unlocking: BoundedVec, MaxUnlockingChunks> = + bounded_vec![UnlockChunk { value: 20, era: 13 }, UnlockChunk { value: 50, era: 14 }]; + assert!(matches!(Staking::ledger(3.into()), + Ok(StakingLedger { + unlocking, + .. + }) if unlocking == expected_unlocking)); + + // when staker unbonds further + Session::roll_until_active_era(12); + + // then further unbonding not possible + assert_noop!(Staking::unbond(RuntimeOrigin::signed(3), 20), Error::::NoMoreChunks); + + // when max unlocking chunks is reduced abruptly to a low value + MaxUnlockingChunks::set(1); + + // then unbond, rebond ops are blocked with ledger in corrupt state + assert_noop!(Staking::unbond(RuntimeOrigin::signed(3), 20), Error::::NotController); + assert_noop!(Staking::rebond(RuntimeOrigin::signed(3), 100), Error::::NotController); + + // reset the ledger corruption + MaxUnlockingChunks::set(2); + + // now rebond works again + assert_ok!(Staking::rebond(RuntimeOrigin::signed(3), 20)); + }) +} + +#[test] +fn switching_roles() { + // Test that it should be possible to switch between roles (nominator, validator, idle) + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Reset reward destination + for i in &[11, 21] { + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(*i), RewardDestination::Stash)); + } + + assert_eq_uvec!(Session::validators(), vec![21, 11]); + + // put some money in account that we'll use. + for i in 1..7 { + let _ = Balances::deposit_creating(&i, 5000); + } + + // add 2 nominators + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2000, RewardDestination::Account(1))); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 5])); + + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Account(3))); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21, 1])); + + // add a new validator candidate + assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1000, RewardDestination::Account(5))); + assert_ok!(Staking::validate(RuntimeOrigin::signed(5), ValidatorPrefs::default())); + + Session::roll_until_active_era(2); + + // with current nominators 11 and 5 have the most stake + assert_eq_uvec!(Session::validators(), vec![5, 11]); + + // 2 decides to be a validator. Consequences: + assert_ok!(Staking::validate(RuntimeOrigin::signed(1), ValidatorPrefs::default())); + // new stakes: + // 11: 1000 self vote + // 21: 1000 self vote + 250 vote + // 5 : 1000 self vote + // 1 : 2000 self vote + 250 vote. + // Winners: 21 and 1 + + Session::roll_until_active_era(3); + + assert_eq_uvec!(Session::validators(), vec![1, 21]); + }); +} + +#[test] +fn bond_with_no_staked_value() { + // Behavior when someone bonds with no staked value. + // Particularly when they votes and the candidate is elected. + ExtBuilder::default() + .validator_count(3) + .existential_deposit(5) + .balance_factor(5) + .nominate(false) + .build_and_execute(|| { + // Can't bond with 1 + assert_noop!( + Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1)), + Error::::InsufficientBond, + ); + // bonded with absolute minimum value possible. + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 5, RewardDestination::Account(1))); + assert_eq!(pallet_balances::Holds::::get(&1)[0].amount, 5); + + // unbonding even 1 will cause all to be unbonded. + assert_ok!(Staking::unbond(RuntimeOrigin::signed(1), 1)); + assert_eq!( + Staking::ledger(1.into()).unwrap(), + StakingLedgerInspect { + stash: 1, + active: 0, + total: 5, + unlocking: bounded_vec![UnlockChunk { value: 5, era: 4 }], + } + ); + + Session::roll_until_active_era(2); + Session::roll_until_active_era(3); + + // not yet removed. + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(1), 0)); + assert!(Staking::ledger(1.into()).is_ok()); + assert_eq!(pallet_balances::Holds::::get(&1)[0].amount, 5); + + Session::roll_until_active_era(4); + + // poof. Account 1 is removed from the staking system. + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(1), 0)); + assert!(Staking::ledger(1.into()).is_err()); + assert_eq!(pallet_balances::Holds::::get(&1).len(), 0); + }); +} + +#[test] +fn bond_with_little_staked_value_bounded() { + ExtBuilder::default().validator_count(3).nominate(false).build_and_execute(|| { + // setup + assert_ok!(Staking::chill(RuntimeOrigin::signed(31))); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); + + // Stingy validator. + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1))); + assert_ok!(Staking::validate(RuntimeOrigin::signed(1), ValidatorPrefs::default())); + + reward_all_elected(); + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + mock::make_all_reward_payment(1); + + // 1 is elected. + assert_eq_uvec!(session_validators(), vec![21, 11, 1]); + + // Old ones are rewarded. + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Stash, amount: 2500 }, + Event::PayoutStarted { era_index: 1, validator_stash: 21, page: 0, next: None }, + Event::Rewarded { stash: 21, dest: RewardDestination::Staked, amount: 2500 }, + Event::PayoutStarted { era_index: 1, validator_stash: 31, page: 0, next: None }, + Event::Rewarded { stash: 31, dest: RewardDestination::Staked, amount: 2500 } + ] + ); + + // reward era 2 + reward_all_elected(); + Session::roll_until_active_era(3); + let _ = staking_events_since_last_call(); + mock::make_all_reward_payment(2); + + // 1 is also rewarded + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 2, validator_stash: 1, page: 0, next: None }, + Event::Rewarded { stash: 1, dest: RewardDestination::Account(1), amount: 2500 }, + Event::PayoutStarted { era_index: 2, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Stash, amount: 2500 }, + Event::PayoutStarted { era_index: 2, validator_stash: 21, page: 0, next: None }, + Event::Rewarded { stash: 21, dest: RewardDestination::Staked, amount: 2500 } + ] + ); + + assert_eq_uvec!(session_validators(), vec![21, 11, 1]); + assert_eq!(Staking::eras_stakers(active_era(), &1).total, 1); + }); +} + +#[test] +fn restricted_accounts_can_only_withdraw() { + ExtBuilder::default().build_and_execute(|| { + // alice is a non blacklisted account. + let alice = 301; + let _ = Balances::make_free_balance_be(&alice, 500); + // alice can bond + assert_ok!(Staking::bond(RuntimeOrigin::signed(alice), 100, RewardDestination::Staked)); + + // and bob is a blacklisted account + let bob = 302; + let _ = Balances::make_free_balance_be(&bob, 500); + restrict(&bob); + + // Bob cannot bond + assert_noop!( + Staking::bond(RuntimeOrigin::signed(bob), 100, RewardDestination::Staked,), + Error::::Restricted + ); + + // alice is blacklisted now and cannot bond anymore + restrict(&alice); + + assert_noop!( + Staking::bond_extra(RuntimeOrigin::signed(alice), 100), + Error::::Restricted + ); + + // but she can unbond her existing bond + assert_ok!(Staking::unbond(RuntimeOrigin::signed(alice), 100)); + + // she cannot rebond the unbonded amount + Session::roll_until_active_era(2); + assert_noop!(Staking::rebond(RuntimeOrigin::signed(alice), 50), Error::::Restricted); + + // move to era when alice fund can be withdrawn + Session::roll_until_active_era(5); + + // alice can withdraw now + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(alice), 0)); + + // she still cannot bond + assert_noop!( + Staking::bond(RuntimeOrigin::signed(alice), 100, RewardDestination::Staked,), + Error::::Restricted + ); + + // bob is removed from restrict list + remove_from_restrict_list(&bob); + + // bob can bond now + assert_ok!(Staking::bond(RuntimeOrigin::signed(bob), 100, RewardDestination::Staked)); + + // and bond extra + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(bob), 100)); + + Session::roll_until_active_era(6); + + // unbond also works. + assert_ok!(Staking::unbond(RuntimeOrigin::signed(bob), 100)); + + // bob can withdraw as well. + Session::roll_until_active_era(6); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(bob), 0)); + }) +} + +#[test] +fn permissionless_withdraw_overstake() { + ExtBuilder::default().build_and_execute(|| { + // Given Alice, Bob and Charlie with some stake. + let alice = 301; + let bob = 302; + let charlie = 303; + let _ = Balances::make_free_balance_be(&alice, 500); + let _ = Balances::make_free_balance_be(&bob, 500); + let _ = Balances::make_free_balance_be(&charlie, 500); + assert_ok!(Staking::bond(RuntimeOrigin::signed(alice), 100, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(bob), 100, RewardDestination::Staked)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(charlie), 100, RewardDestination::Staked)); + + // WHEN: charlie is partially unbonding. + assert_ok!(Staking::unbond(RuntimeOrigin::signed(charlie), 90)); + let charlie_ledger = StakingLedger::::get(StakingAccount::Stash(charlie)).unwrap(); + + // AND: alice and charlie ledger having higher value than actual stake. + Ledger::::insert(alice, StakingLedger::::new(alice, 200)); + Ledger::::insert( + charlie, + StakingLedger { stash: charlie, total: 200, active: 200 - 90, ..charlie_ledger }, + ); + + // THEN overstake can be permissionlessly withdrawn. + let _ = staking_events_since_last_call(); + + // Alice stake is corrected. + assert_eq!( + ::stake(&alice).unwrap(), + Stake { total: 200, active: 200 } + ); + assert_ok!(Staking::withdraw_overstake(RuntimeOrigin::signed(1), alice)); + assert_eq!( + ::stake(&alice).unwrap(), + Stake { total: 100, active: 100 } + ); + + // Charlie who is partially withdrawing also gets their stake corrected. + assert_eq!( + ::stake(&charlie).unwrap(), + Stake { total: 200, active: 110 } + ); + assert_ok!(Staking::withdraw_overstake(RuntimeOrigin::signed(1), charlie)); + assert_eq!( + ::stake(&charlie).unwrap(), + Stake { total: 200 - 100, active: 110 - 100 } + ); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::Withdrawn { stash: alice, amount: 200 - 100 }, + Event::Withdrawn { stash: charlie, amount: 200 - 100 } + ] + ); + + // but Bob ledger is fine and that cannot be withdrawn. + assert_noop!( + Staking::withdraw_overstake(RuntimeOrigin::signed(1), bob), + Error::::BoundNotMet + ); + }); +} + +mod rebobd { + use super::*; + + #[test] + fn rebond_works() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // nothing to rebond + assert_noop!( + Staking::rebond(RuntimeOrigin::signed(11), 500), + Error::::NoUnlockChunk + ); + + // given + Staking::unbond(RuntimeOrigin::signed(11), 900).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 100, + unlocking: bounded_vec![UnlockChunk { value: 900, era: 1 + 3 }], + } + ); + + // then rebond all the funds unbonded. + Staking::rebond(RuntimeOrigin::signed(11), 900).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // Unbond almost all of the funds in stash. + Staking::unbond(RuntimeOrigin::signed(11), 900).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 100, + unlocking: bounded_vec![UnlockChunk { value: 900, era: 1 + 3 }], + } + ); + + // Re-bond part of the funds unbonded. + Staking::rebond(RuntimeOrigin::signed(11), 500).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 600, + unlocking: bounded_vec![UnlockChunk { value: 400, era: 1 + 3 }], + } + ); + + // Re-bond the remainder of the funds unbonded. + Staking::rebond(RuntimeOrigin::signed(11), 500).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // Unbond parts of the funds in stash. + Staking::unbond(RuntimeOrigin::signed(11), 300).unwrap(); + Staking::unbond(RuntimeOrigin::signed(11), 300).unwrap(); + Staking::unbond(RuntimeOrigin::signed(11), 300).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 100, + unlocking: bounded_vec![UnlockChunk { value: 900, era: 1 + 3 }], + } + ); + + // Re-bond part of the funds unbonded. + Staking::rebond(RuntimeOrigin::signed(11), 500).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 600, + unlocking: bounded_vec![UnlockChunk { value: 400, era: 1 + 3 }], + } + ); + }) + } + + #[test] + fn rebond_is_fifo() { + ExtBuilder::default().build_and_execute(|| { + // given + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // Unbond some of the funds in stash. + Staking::unbond(RuntimeOrigin::signed(11), 400).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 600, + unlocking: bounded_vec![UnlockChunk { value: 400, era: 1 + 3 }], + } + ); + + Session::roll_until_active_era(2); + + // Unbond more of the funds in stash. + Staking::unbond(RuntimeOrigin::signed(11), 300).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 300, + unlocking: bounded_vec![ + UnlockChunk { value: 400, era: 1 + 3 }, + UnlockChunk { value: 300, era: 2 + 3 }, + ], + } + ); + + Session::roll_until_active_era(3); + + // Unbond yet more of the funds in stash. + Staking::unbond(RuntimeOrigin::signed(11), 200).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 100, + unlocking: bounded_vec![ + UnlockChunk { value: 400, era: 1 + 3 }, + UnlockChunk { value: 300, era: 2 + 3 }, + UnlockChunk { value: 200, era: 3 + 3 }, + ], + } + ); + + // Re-bond half of the unbonding funds. + Staking::rebond(RuntimeOrigin::signed(11), 400).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 500, + unlocking: bounded_vec![ + UnlockChunk { value: 400, era: 1 + 3 }, + UnlockChunk { value: 100, era: 2 + 3 }, + ], + } + ); + }) + } + + #[test] + fn rebond_emits_right_value_in_event() { + // When a user calls rebond with more than can be rebonded, things succeed, + // and the rebond event emits the actual value rebonded. + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Give account 11 some large free balance greater than total + let _ = asset::set_stakeable_balance::(&11, 1000000); + + // Unbond almost all of the funds in stash. + Staking::unbond(RuntimeOrigin::signed(11), 900).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 100, + unlocking: bounded_vec![UnlockChunk { value: 900, era: 1 + 3 }], + } + ); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Unbonded { stash: 11, amount: 900 }] + ); + + // Re-bond less than the total + Staking::rebond(RuntimeOrigin::signed(11), 100).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 200, + unlocking: bounded_vec![UnlockChunk { value: 800, era: 1 + 3 }], + } + ); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Bonded { stash: 11, amount: 100 }] + ); + + // Re-bond way more than available + Staking::rebond(RuntimeOrigin::signed(11), 100_000).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Bonded { stash: 11, amount: 800 }] + ); + }); + } + + #[test] + fn cannot_rebond_to_lower_than_ed() { + ExtBuilder::default() + .existential_deposit(11) + .balance_factor(11) + .build_and_execute(|| { + // initial stuff. + assert_eq!( + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { + stash: 21, + total: 11 * 1000, + active: 11 * 1000, + unlocking: Default::default(), + } + ); + + // unbond all of it. must be chilled first. + assert_ok!(Staking::chill(RuntimeOrigin::signed(21))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(21), 11 * 1000)); + + assert_eq!( + Staking::ledger(21.into()).unwrap(), + StakingLedgerInspect { + stash: 21, + total: 11 * 1000, + active: 0, + unlocking: bounded_vec![UnlockChunk { value: 11 * 1000, era: 4 }], + } + ); + + // now bond a wee bit more + assert_noop!( + Staking::rebond(RuntimeOrigin::signed(21), 5), + Error::::InsufficientBond + ); + }) + } +} + +mod reap { + use super::*; + + #[test] + fn reap_stash_works() { + ExtBuilder::default() + .existential_deposit(10) + .balance_factor(10) + .build_and_execute(|| { + // given + assert_eq!(asset::staked::(&11), 10 * 1000); + assert_eq!(Staking::bonded(&11), Some(11)); + + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // stash is not reapable + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0), + Error::::FundedTarget + ); + + // no easy way to cause an account to go below ED, we tweak their staking ledger + // instead. + Ledger::::insert(11, StakingLedger::::new(11, 5)); + + // reap-able + assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0)); + + // then + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + // lock is removed. + assert_eq!(asset::staked::(&11), 0); + }); + } + + #[test] + fn reap_stash_works_with_existential_deposit_zero() { + ExtBuilder::default() + .existential_deposit(0) + .balance_factor(10) + .build_and_execute(|| { + // given + assert_eq!(asset::staked::(&11), 10 * 1000); + assert_eq!(Staking::bonded(&11), Some(11)); + + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // stash is not reapable + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0), + Error::::FundedTarget + ); + + // no easy way to cause an account to go below ED, we tweak their staking ledger + // instead. + Ledger::::insert(11, StakingLedger::::new(11, 0)); + + // reap-able + assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0)); + + // then + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + // lock is removed. + assert_eq!(asset::staked::(&11), 0); + }); + } +} + +mod nominate { + use super::*; + #[test] + fn duplicate_nominations_stripped() { + ExtBuilder::default().nominate(false).set_stake(31, 1000).build_and_execute(|| { + // ensure all have equal stake. + assert_eq!( + >::iter() + .map(|(v, _)| (v, Staking::ledger(v.into()).unwrap().total)) + .collect::>(), + vec![(31, 1000), (21, 1000), (11, 1000)], + ); + + // no nominators shall exist. + assert!(>::iter().map(|(n, _)| n).collect::>().is_empty()); + + bond_nominator(1, 1000, vec![11, 11, 11, 21, 31]); + assert_eq!( + Nominators::::get(1).unwrap(), + Nominations { + targets: bounded_vec![11, 21, 31], + submitted_in: 1, + suppressed: false + } + ); + }); + } + + #[test] + fn nominating_non_validators_is_ok() { + ExtBuilder::default().nominate(false).set_stake(31, 1000).build_and_execute(|| { + // ensure all have equal stake. + assert_eq!( + >::iter() + .map(|(v, _)| (v, Staking::ledger(v.into()).unwrap().total)) + .collect::>(), + vec![(31, 1000), (21, 1000), (11, 1000)], + ); + + // no nominators shall exist. + assert!(>::iter().map(|(n, _)| n).collect::>().is_empty()); + + bond_nominator(1, 1000, vec![11, 21, 31, 41]); + assert_eq!( + Nominators::::get(1).unwrap(), + Nominations { + targets: bounded_vec![11, 21, 31, 41], + submitted_in: 1, + suppressed: false + } + ); + }); + } + + #[test] + fn blocking_and_kicking_works() { + ExtBuilder::default().validator_count(4).nominate(true).build_and_execute(|| { + // given + assert_ok!(Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { blocked: true, ..Default::default() } + )); + + // attempt to nominate from 101 + assert_ok!(Staking::nominate(RuntimeOrigin::signed(101), vec![11])); + + // should have worked since we're already nominated them + assert_eq!(Nominators::::get(&101).unwrap().targets, vec![11]); + + // kick the nominator + assert_ok!(Staking::kick(RuntimeOrigin::signed(11), vec![101])); + + // should have been kicked now + assert!(Nominators::::get(&101).unwrap().targets.is_empty()); + + // attempt to nominate from 100/101... + assert_noop!( + Staking::nominate(RuntimeOrigin::signed(101), vec![11]), + Error::::BadTarget + ); + }); + } +} + +mod staking_bounds_chill_other { + use super::*; + + #[test] + fn min_bond_checks_work() { + ExtBuilder::default() + .existential_deposit(100) + .balance_factor(100) + .min_nominator_bond(1_000) + .min_validator_bond(1_500) + .build_and_execute(|| { + // 500 is not enough for any role + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Stash)); + assert_noop!( + Staking::nominate(RuntimeOrigin::signed(3), vec![1]), + Error::::InsufficientBond + ); + assert_noop!( + Staking::validate(RuntimeOrigin::signed(3), ValidatorPrefs::default()), + Error::::InsufficientBond, + ); + + // 1000 is enough for nominator + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(3), 500)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![1])); + assert_noop!( + Staking::validate(RuntimeOrigin::signed(3), ValidatorPrefs::default()), + Error::::InsufficientBond, + ); + + // 1500 is enough for validator + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(3), 500)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![1])); + assert_ok!(Staking::validate(RuntimeOrigin::signed(3), ValidatorPrefs::default())); + + // Can't unbond anything as validator + assert_noop!( + Staking::unbond(RuntimeOrigin::signed(3), 500), + Error::::InsufficientBond + ); + + // Once they are a nominator, they can unbond 500 + assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![1])); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(3), 500)); + assert_noop!( + Staking::unbond(RuntimeOrigin::signed(3), 500), + Error::::InsufficientBond + ); + + // Once they are chilled they can unbond everything + assert_ok!(Staking::chill(RuntimeOrigin::signed(3))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(3), 1000)); + }) + } + + #[test] + fn chill_other_works() { + ExtBuilder::default() + .existential_deposit(100) + .balance_factor(100) + .min_nominator_bond(1_000) + .min_validator_bond(1_500) + .build_and_execute(|| { + let initial_validators = Validators::::count(); + let initial_nominators = Nominators::::count(); + for i in 0..15 { + let a = 4 * i; + let b = 4 * i + 2; + asset::set_stakeable_balance::(&a, 100_000); + asset::set_stakeable_balance::(&b, 100_000); + + // Nominator + assert_ok!(Staking::bond( + RuntimeOrigin::signed(a), + 1000, + RewardDestination::Stash + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(a), vec![1])); + + // Validator + assert_ok!(Staking::bond( + RuntimeOrigin::signed(b), + 1500, + RewardDestination::Stash + )); + assert_ok!(Staking::validate( + RuntimeOrigin::signed(b), + ValidatorPrefs::default() + )); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::Bonded { stash: a, amount: 1000 }, + Event::Bonded { stash: b, amount: 1500 }, + Event::ValidatorPrefsSet { + stash: b, + prefs: ValidatorPrefs { commission: Zero::zero(), blocked: false } + } + ] + ); + } + + // To chill other users, we need to: + // * Set a minimum bond amount + // * Set a limit + // * Set a threshold + // + // If any of these are missing, we do not have enough information to allow the + // `chill_other` to succeed from one user to another. + // + // Out of 8 possible cases, only one will allow the use of `chill_other`, which is + // when all 3 conditions are met. + + // 1. No limits whatsoever + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + )); + + // Can't chill these users + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 0), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 2), + Error::::CannotChillOther + ); + + // 2. Change only the minimum bonds. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Set(1_500), + ConfigOp::Set(2_000), + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + )); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 0), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 2), + Error::::CannotChillOther + ); + + // 3. Add nominator/validator count limits, but no other threshold. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Set(10), + ConfigOp::Set(10), + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + )); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 0), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 2), + Error::::CannotChillOther + ); + + // 4. Add chill threshold, but no other limits + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Set(Percent::from_percent(75)), + ConfigOp::Noop, + ConfigOp::Noop, + )); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 0), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 2), + Error::::CannotChillOther + ); + + // 5. Add bond and count limits, but no threshold + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Set(1_500), + ConfigOp::Set(2_000), + ConfigOp::Set(10), + ConfigOp::Set(10), + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + )); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 0), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 2), + Error::::CannotChillOther + ); + + // 6. Add bond and threshold limits, but no count limits + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Set(Percent::from_percent(75)), + ConfigOp::Noop, + ConfigOp::Noop, + )); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 0), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 2), + Error::::CannotChillOther + ); + + // 7. Add count limits and a chill threshold, but no bond limits + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Set(10), + ConfigOp::Set(10), + ConfigOp::Set(Percent::from_percent(75)), + ConfigOp::Noop, + ConfigOp::Noop, + )); + + // Still can't chill these users + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 0), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 2), + Error::::CannotChillOther + ); + + // 8. Add all limits + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Set(1_500), + ConfigOp::Set(2_000), + ConfigOp::Set(10), + ConfigOp::Set(10), + ConfigOp::Set(Percent::from_percent(75)), + ConfigOp::Noop, + ConfigOp::Noop, + )); + + // 16 people total because tests start with 2 active one + assert_eq!(Nominators::::count(), 15 + initial_nominators); + assert_eq!(Validators::::count(), 15 + initial_validators); + + // Users can now be chilled down to 7 people, so we try to remove 9 of them + // (starting with 16) + for i in 6..15 { + let b = 4 * i; + let d = 4 * i + 2; + assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), b)); + assert_eq!(*staking_events().last().unwrap(), Event::Chilled { stash: b }); + assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), d)); + assert_eq!(*staking_events().last().unwrap(), Event::Chilled { stash: d }); + } + + // chill a nominator. Limit is not reached, not chill-able + assert_eq!(Nominators::::count(), 7); + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1337), 0), + Error::::CannotChillOther + ); + // chill a validator. Limit is reached, chill-able. + assert_eq!(Validators::::count(), 9); + assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), 2)); + }) + } + + #[test] + fn capped_stakers_works() { + ExtBuilder::default().build_and_execute(|| { + let validator_count = Validators::::count(); + assert_eq!(validator_count, 3); + let nominator_count = Nominators::::count(); + assert_eq!(nominator_count, 1); + + // Change the maximums + let max = 10; + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Set(10), + ConfigOp::Set(10), + ConfigOp::Set(max), + ConfigOp::Set(max), + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Noop, + )); + + // can create `max - validator_count` validators + let mut some_existing_validator = AccountId::default(); + for i in 0..max - validator_count { + let (_, controller) = testing_utils::create_stash_controller::( + i + 10_000_000, + 100, + RewardDestination::Stash, + ) + .unwrap(); + assert_ok!(Staking::validate( + RuntimeOrigin::signed(controller), + ValidatorPrefs::default() + )); + some_existing_validator = controller; + } + + // but no more + let (_, last_validator) = + testing_utils::create_stash_controller::(1337, 100, RewardDestination::Stash) + .unwrap(); + + assert_noop!( + Staking::validate(RuntimeOrigin::signed(last_validator), ValidatorPrefs::default()), + Error::::TooManyValidators, + ); + + // same with nominators + let mut some_existing_nominator = AccountId::default(); + for i in 0..max - nominator_count { + let (_, controller) = testing_utils::create_stash_controller::( + i + 20_000_000, + 100, + RewardDestination::Stash, + ) + .unwrap(); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![1])); + some_existing_nominator = controller; + } + + // one more is too many. + let (_, last_nominator) = testing_utils::create_stash_controller::( + 30_000_000, + 100, + RewardDestination::Stash, + ) + .unwrap(); + assert_noop!( + Staking::nominate(RuntimeOrigin::signed(last_nominator), vec![1]), + Error::::TooManyNominators + ); + + // Re-nominate works fine + assert_ok!(Staking::nominate(RuntimeOrigin::signed(some_existing_nominator), vec![1])); + // Re-validate works fine + assert_ok!(Staking::validate( + RuntimeOrigin::signed(some_existing_validator), + ValidatorPrefs::default() + )); + + // No problem when we set to `None` again + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(last_nominator), vec![1])); + assert_ok!(Staking::validate( + RuntimeOrigin::signed(last_validator), + ValidatorPrefs::default() + )); + }) + } +} diff --git a/substrate/frame/staking-async/src/tests/configs.rs b/substrate/frame/staking-async/src/tests/configs.rs new file mode 100644 index 0000000000000..99f68b8e211f4 --- /dev/null +++ b/substrate/frame/staking-async/src/tests/configs.rs @@ -0,0 +1,73 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +#[test] +fn set_staking_configs_works() { + ExtBuilder::default().build_and_execute(|| { + // setting works + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Set(1_500), + ConfigOp::Set(2_000), + ConfigOp::Set(10), + ConfigOp::Set(20), + ConfigOp::Set(Percent::from_percent(75)), + ConfigOp::Set(Zero::zero()), + ConfigOp::Set(Zero::zero()) + )); + assert_eq!(MinNominatorBond::::get(), 1_500); + assert_eq!(MinValidatorBond::::get(), 2_000); + assert_eq!(MaxNominatorsCount::::get(), Some(10)); + assert_eq!(MaxValidatorsCount::::get(), Some(20)); + assert_eq!(ChillThreshold::::get(), Some(Percent::from_percent(75))); + assert_eq!(MinCommission::::get(), Perbill::from_percent(0)); + assert_eq!(MaxStakedRewards::::get(), Some(Percent::from_percent(0))); + + // noop does nothing + assert_storage_noop!(assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop + ))); + + // removing works + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove + )); + assert_eq!(MinNominatorBond::::get(), 0); + assert_eq!(MinValidatorBond::::get(), 0); + assert_eq!(MaxNominatorsCount::::get(), None); + assert_eq!(MaxValidatorsCount::::get(), None); + assert_eq!(ChillThreshold::::get(), None); + assert_eq!(MinCommission::::get(), Perbill::from_percent(0)); + assert_eq!(MaxStakedRewards::::get(), None); + }); +} diff --git a/substrate/frame/staking-async/src/tests/controller.rs b/substrate/frame/staking-async/src/tests/controller.rs new file mode 100644 index 0000000000000..400c3185fa943 --- /dev/null +++ b/substrate/frame/staking-async/src/tests/controller.rs @@ -0,0 +1,87 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +#[test] +fn change_controller_works() { + ExtBuilder::default().build_and_execute(|| { + let (stash, controller) = testing_utils::create_unique_stash_controller::( + 0, + 100, + RewardDestination::Staked, + false, + ) + .unwrap(); + + // ensure `stash` and `controller` are bonded as stash controller pair. + assert_eq!(Staking::bonded(&stash), Some(controller)); + + // `controller` can control `stash` who is initially a validator. + assert_ok!(Staking::chill(RuntimeOrigin::signed(controller))); + + // sets controller back to `stash`. + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(stash))); + assert_eq!(Staking::bonded(&stash), Some(stash)); + + // fetch the ledger from storage and check if the controller is correct. + let ledger = Staking::ledger(StakingAccount::Stash(stash)).unwrap(); + assert_eq!(ledger.controller(), Some(stash)); + + // same if we fetch the ledger by controller. + let ledger = Staking::ledger(StakingAccount::Controller(stash)).unwrap(); + assert_eq!(ledger.controller, Some(stash)); + assert_eq!(ledger.controller(), Some(stash)); + + // the raw storage ledger's controller is always `None`. however, we can still fetch the + // correct controller with `ledger.controller()`. + let raw_ledger = >::get(&stash).unwrap(); + assert_eq!(raw_ledger.controller, None); + + // `controller` is no longer in control. `stash` is now controller. + assert_noop!( + Staking::validate(RuntimeOrigin::signed(controller), ValidatorPrefs::default()), + Error::::NotController, + ); + assert_ok!(Staking::validate(RuntimeOrigin::signed(stash), ValidatorPrefs::default())); + }) +} + +#[test] +fn change_controller_already_paired_once_stash() { + ExtBuilder::default().build_and_execute(|| { + // 11 and 11 are bonded as controller and stash respectively. + assert_eq!(Staking::bonded(&11), Some(11)); + + // 11 is initially a validator. + assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); + + // Controller cannot change once matching with stash. + assert_noop!( + Staking::set_controller(RuntimeOrigin::signed(11)), + Error::::AlreadyPaired + ); + assert_eq!(Staking::bonded(&11), Some(11)); + + // 10 is no longer in control. + assert_noop!( + Staking::validate(RuntimeOrigin::signed(10), ValidatorPrefs::default()), + Error::::NotController, + ); + assert_ok!(Staking::validate(RuntimeOrigin::signed(11), ValidatorPrefs::default())); + }) +} diff --git a/substrate/frame/staking-async/src/tests/election_data_provider.rs b/substrate/frame/staking-async/src/tests/election_data_provider.rs new file mode 100644 index 0000000000000..ddc17560e5746 --- /dev/null +++ b/substrate/frame/staking-async/src/tests/election_data_provider.rs @@ -0,0 +1,875 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use frame_election_provider_support::ElectionDataProvider; + +#[test] +fn set_minimum_active_stake_is_correct() { + ExtBuilder::default() + .nominate(false) + .add_staker(61, 2_000, StakerStatus::::Nominator(vec![21])) + .add_staker(71, 10, StakerStatus::::Nominator(vec![21])) + .add_staker(81, 50, StakerStatus::::Nominator(vec![21])) + .build_and_execute(|| { + // default bounds are unbounded. + assert_ok!(::electing_voters( + DataProviderBounds::default(), + 0 + )); + assert_eq!(MinimumActiveStake::::get(), 10); + + // remove staker with lower bond by limiting the number of voters and check + // `MinimumActiveStake` again after electing voters. + let bounds = ElectionBoundsBuilder::default().voters_count(5.into()).build(); + assert_ok!(::electing_voters(bounds.voters, 0)); + assert_eq!(MinimumActiveStake::::get(), 50); + }); +} + +#[test] +fn set_minimum_active_stake_lower_bond_works() { + // lower non-zero active stake below `MinNominatorBond` is the minimum active stake if + // it is selected as part of the npos voters. + ExtBuilder::default().has_stakers(true).nominate(true).build_and_execute(|| { + assert_eq!(MinNominatorBond::::get(), 1); + assert_eq!(::VoterList::count(), 4); + + assert_ok!(Staking::bond(RuntimeOrigin::signed(4), 5, RewardDestination::Staked,)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); + assert_eq!(::VoterList::count(), 5); + + let voters_before = + ::electing_voters(DataProviderBounds::default(), 0) + .unwrap(); + assert_eq!(MinimumActiveStake::::get(), 5); + + // update minimum nominator bond. + MinNominatorBond::::set(10); + assert_eq!(MinNominatorBond::::get(), 10); + // voter list still considers nominator 4 for voting, even though its active stake is + // lower than `MinNominatorBond`. + assert_eq!(::VoterList::count(), 5); + + let voters = + ::electing_voters(DataProviderBounds::default(), 0) + .unwrap(); + assert_eq!(voters_before, voters); + + // minimum active stake is lower than `MinNominatorBond`. + assert_eq!(MinimumActiveStake::::get(), 5); + }); +} + +#[test] +fn set_minimum_active_bond_corrupt_state() { + ExtBuilder::default() + .has_stakers(true) + .nominate(true) + .add_staker(61, 2_000, StakerStatus::::Nominator(vec![21])) + .build_and_execute(|| { + assert_eq!(Staking::weight_of(&101), 500); + let voters = ::electing_voters( + DataProviderBounds::default(), + 0, + ) + .unwrap(); + assert_eq!(voters.len(), 5); + assert_eq!(MinimumActiveStake::::get(), 500); + + Session::roll_until_active_era(10); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(101), 200)); + Session::roll_until_active_era(20); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(101), 100)); + + // corrupt ledger state by lowering max unlocking chunks bounds. + MaxUnlockingChunks::set(1); + + let voters = ::electing_voters( + DataProviderBounds::default(), + 0, + ) + .unwrap(); + + // number of returned voters decreases since ledger entry of stash 101 is now + // corrupt. + assert_eq!(voters.len(), 4); + // minimum active stake does not take into consideration the corrupt entry. + assert_eq!(MinimumActiveStake::::get(), 2_000); + + // voter weight of corrupted ledger entry is 0. + assert_eq!(Staking::weight_of(&101), 0); + + // reset max unlocking chunks for try_state to pass. + MaxUnlockingChunks::set(32); + }) +} + +#[test] +fn voters_include_self_vote() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // default bounds are unbounded. + assert!(>::iter().map(|(x, _)| x).all(|v| Staking::electing_voters( + DataProviderBounds::default(), + 0 + ) + .unwrap() + .into_iter() + .any(|(w, _, t)| { v == w && t[0] == w }))) + }) +} + +#[test] +#[should_panic] +#[cfg(debug_assertions)] +fn only_iterates_max_2_times_max_allowed_len() { + ExtBuilder::default() + .nominate(false) + // the best way to invalidate a bunch of nominators is to have them nominate a lot of + // ppl, but then lower the MaxNomination limit. + .add_staker(61, 2_000, StakerStatus::::Nominator(vec![21, 22, 23, 24, 25])) + .add_staker(71, 2_000, StakerStatus::::Nominator(vec![21, 22, 23, 24, 25])) + .add_staker(81, 2_000, StakerStatus::::Nominator(vec![21, 22, 23, 24, 25])) + .build_and_execute(|| { + let bounds_builder = ElectionBoundsBuilder::default(); + // all voters ordered by stake, + assert_eq!( + ::VoterList::iter().collect::>(), + vec![61, 71, 81, 11, 21, 31] + ); + + AbsoluteMaxNominations::set(2); + + // we want 2 voters now, and in maximum we allow 4 iterations. This is what happens: + // 61 is pruned; + // 71 is pruned; + // 81 is pruned; + // 11 is taken; + // we finish since the 2x limit is reached. + assert_eq!( + Staking::electing_voters(bounds_builder.voters_count(2.into()).build().voters, 0) + .unwrap() + .iter() + .map(|(stash, _, _)| stash) + .copied() + .collect::>(), + vec![11], + ); + }); +} + +#[test] +fn respects_snapshot_count_limits() { + ExtBuilder::default() + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + // sum of all nominators who'd be voters (1), plus the self-votes (4). + assert_eq!(::VoterList::count(), 5); + + let bounds_builder = ElectionBoundsBuilder::default(); + + // if voter count limit is less.. + assert_eq!( + Staking::electing_voters(bounds_builder.voters_count(1.into()).build().voters, 0) + .unwrap() + .len(), + 1 + ); + + // if voter count limit is equal.. + assert_eq!( + Staking::electing_voters(bounds_builder.voters_count(5.into()).build().voters, 0) + .unwrap() + .len(), + 5 + ); + + // if voter count limit is more. + assert_eq!( + Staking::electing_voters(bounds_builder.voters_count(55.into()).build().voters, 0) + .unwrap() + .len(), + 5 + ); + + // if target count limit is more.. + assert_eq!( + Staking::electable_targets( + bounds_builder.targets_count(6.into()).build().targets, + 0, + ) + .unwrap() + .len(), + 4 + ); + + // if target count limit is equal.. + assert_eq!( + Staking::electable_targets( + bounds_builder.targets_count(4.into()).build().targets, + 0, + ) + .unwrap() + .len(), + 4 + ); + + // if target limit count is less, then we return an error. + assert_eq!( + Staking::electable_targets( + bounds_builder.targets_count(1.into()).build().targets, + 0 + ) + .unwrap() + .len(), + 1, + ); + }); +} + +#[test] +fn respects_snapshot_size_limits() { + ExtBuilder::default().build_and_execute(|| { + // voters: set size bounds that allows only for 1 voter. + let bounds = ElectionBoundsBuilder::default().voters_size(26.into()).build(); + let elected = Staking::electing_voters(bounds.voters, 0).unwrap(); + assert!(elected.encoded_size() == 26 as usize); + let prev_len = elected.len(); + + // larger size bounds means more quota for voters. + let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build(); + let elected = Staking::electing_voters(bounds.voters, 0).unwrap(); + assert!(elected.encoded_size() <= 100 as usize); + assert!(elected.len() > 1 && elected.len() > prev_len); + + // targets: set size bounds that allows for only one target to fit in the snapshot. + let bounds = ElectionBoundsBuilder::default().targets_size(10.into()).build(); + let elected = Staking::electable_targets(bounds.targets, 0).unwrap(); + assert!(elected.encoded_size() == 9 as usize); + let prev_len = elected.len(); + + // larger size bounds means more space for targets. + let bounds = ElectionBoundsBuilder::default().targets_size(100.into()).build(); + let elected = Staking::electable_targets(bounds.targets, 0).unwrap(); + assert!(elected.encoded_size() <= 100 as usize); + assert!(elected.len() > 1 && elected.len() > prev_len); + }); +} + +#[test] +fn nomination_quota_checks_at_nominate_works() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // stash bond of 222 has a nomination quota of 2 targets. + bond(61, 222); + assert_eq!(Staking::api_nominations_quota(222), 2); + + // nominating with targets below the nomination quota works. + assert_ok!(Staking::nominate(RuntimeOrigin::signed(61), vec![11])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(61), vec![11, 12])); + + // nominating with targets above the nomination quota returns error. + assert_noop!( + Staking::nominate(RuntimeOrigin::signed(61), vec![11, 12, 13]), + Error::::TooManyTargets + ); + }); +} + +#[test] +#[should_panic] +#[cfg(debug_assertions)] +fn change_of_absolute_max_nominations() { + use frame_election_provider_support::ElectionDataProvider; + ExtBuilder::default() + .add_staker(61, 10, StakerStatus::Nominator(vec![1])) + .add_staker(71, 10, StakerStatus::Nominator(vec![1, 2, 3])) + .balance_factor(10) + .build_and_execute(|| { + // pre-condition + assert_eq!(AbsoluteMaxNominations::get(), 16); + + assert_eq!( + Nominators::::iter() + .map(|(k, n)| (k, n.targets.len())) + .collect::>(), + vec![(101, 2), (71, 3), (61, 1)] + ); + + // default bounds are unbounded. + let bounds = DataProviderBounds::default(); + + // 3 validators and 3 nominators + assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3); + + // abrupt change from 16 to 4, everyone should be fine. + AbsoluteMaxNominations::set(4); + + assert_eq!( + Nominators::::iter() + .map(|(k, n)| (k, n.targets.len())) + .collect::>(), + vec![(101, 2), (71, 3), (61, 1)] + ); + assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3); + + // No one can be chilled on account of non-decodable keys. + for k in Nominators::::iter_keys() { + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1), k), + Error::::CannotChillOther + ); + } + + // abrupt change from 4 to 3, everyone should be fine. + AbsoluteMaxNominations::set(3); + + assert_eq!( + Nominators::::iter() + .map(|(k, n)| (k, n.targets.len())) + .collect::>(), + vec![(101, 2), (71, 3), (61, 1)] + ); + assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3); + + // As before, no one can be chilled on account of non-decodable keys. + for k in Nominators::::iter_keys() { + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1), k), + Error::::CannotChillOther + ); + } + + // abrupt change from 3 to 2, this should cause some nominators to be non-decodable, + // and thus non-existent unless they update. + AbsoluteMaxNominations::set(2); + + assert_eq!( + Nominators::::iter() + .map(|(k, n)| (k, n.targets.len())) + .collect::>(), + vec![(101, 2), (61, 1)] + ); + + // 101 and 61 still cannot be chilled by someone else. + for k in [101, 61].iter() { + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1), *k), + Error::::CannotChillOther + ); + } + + // 71 is still in storage.. + assert!(Nominators::::contains_key(71)); + // but its value cannot be decoded and default is returned. + assert!(Nominators::::get(71).is_none()); + + assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 2); + assert!(Nominators::::contains_key(101)); + + // abrupt change from 2 to 1, this should cause some nominators to be non-decodable, + // and thus non-existent unless they update. + AbsoluteMaxNominations::set(1); + + assert_eq!( + Nominators::::iter() + .map(|(k, n)| (k, n.targets.len())) + .collect::>(), + vec![(61, 1)] + ); + + // 61 *still* cannot be chilled by someone else. + assert_noop!( + Staking::chill_other(RuntimeOrigin::signed(1), 61), + Error::::CannotChillOther + ); + + assert!(Nominators::::contains_key(71)); + assert!(Nominators::::contains_key(61)); + assert!(Nominators::::get(71).is_none()); + assert!(Nominators::::get(61).is_some()); + assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 1); + + // now one of them can revive themselves by re-nominating to a proper value. + assert_ok!(Staking::nominate(RuntimeOrigin::signed(71), vec![1])); + assert_eq!( + Nominators::::iter() + .map(|(k, n)| (k, n.targets.len())) + .collect::>(), + vec![(71, 1), (61, 1)] + ); + + // or they can be chilled by any account. + assert!(Nominators::::contains_key(101)); + assert!(Nominators::::get(101).is_none()); + assert_ok!(Staking::chill_other(RuntimeOrigin::signed(71), 101)); + assert_eq!(*staking_events().last().unwrap(), Event::Chilled { stash: 101 }); + assert!(!Nominators::::contains_key(101)); + assert!(Nominators::::get(101).is_none()); + }) +} + +#[test] +fn nomination_quota_max_changes_decoding() { + use frame_election_provider_support::ElectionDataProvider; + ExtBuilder::default() + .add_staker(60, 10, StakerStatus::Nominator(vec![1])) + .add_staker(70, 10, StakerStatus::Nominator(vec![1, 2, 3])) + .add_staker(30, 10, StakerStatus::Nominator(vec![1, 2, 3, 4])) + .add_staker(50, 10, StakerStatus::Nominator(vec![1, 2, 3, 4])) + .balance_factor(11) + .build_and_execute(|| { + // pre-condition. + assert_eq!(MaxNominationsOf::::get(), 16); + + let unbonded_election = DataProviderBounds::default(); + + assert_eq!( + Nominators::::iter() + .map(|(k, n)| (k, n.targets.len())) + .collect::>(), + vec![(70, 3), (101, 2), (50, 4), (30, 4), (60, 1)] + ); + + // 4 validators and 4 nominators + assert_eq!(Staking::electing_voters(unbonded_election, 0).unwrap().len(), 4 + 4); + }); +} + +#[test] +fn api_nominations_quota_works() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Staking::api_nominations_quota(10), MaxNominationsOf::::get()); + assert_eq!(Staking::api_nominations_quota(333), MaxNominationsOf::::get()); + assert_eq!(Staking::api_nominations_quota(222), 2); + assert_eq!(Staking::api_nominations_quota(111), 1); + }) +} + +#[test] +fn lazy_quota_npos_voters_works_above_quota() { + ExtBuilder::default() + .nominate(false) + .add_staker( + 61, + 300, // 300 bond has 16 nomination quota. + StakerStatus::::Nominator(vec![21, 22, 23, 24, 25]), + ) + .build_and_execute(|| { + // unbond 78 from stash 60 so that it's bonded balance is 222, which has a lower + // nomination quota than at nomination time (max 2 targets). + assert_ok!(Staking::unbond(RuntimeOrigin::signed(61), 78)); + assert_eq!(Staking::api_nominations_quota(300 - 78), 2); + + // even through 61 has nomination quota of 2 at the time of the election, all the + // nominations (5) will be used. + assert_eq!( + Staking::electing_voters(DataProviderBounds::default(), 0) + .unwrap() + .iter() + .map(|(stash, _, targets)| (*stash, targets.len())) + .collect::>(), + vec![(11, 1), (21, 1), (31, 1), (61, 5)], + ); + }); +} + +#[test] +fn nominations_quota_limits_size_work() { + ExtBuilder::default() + .nominate(false) + .add_staker(71, 333, StakerStatus::::Nominator(vec![16, 15, 14, 13, 12, 11, 10])) + .build_and_execute(|| { + // nominations of controller 70 won't be added due to voter size limit exceeded. + let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build(); + assert_eq!( + Staking::electing_voters(bounds.voters, 0) + .unwrap() + .iter() + .map(|(stash, _, targets)| (*stash, targets.len())) + .collect::>(), + vec![(11, 1), (21, 1), (31, 1)], + ); + + assert_eq!( + *staking_events().last().unwrap(), + Event::SnapshotVotersSizeExceeded { size: 75 } + ); + + // however, if the election voter size bounds were larger, the snapshot would + // include the electing voters of 70. + let bounds = ElectionBoundsBuilder::default().voters_size(1_000.into()).build(); + assert_eq!( + Staking::electing_voters(bounds.voters, 0) + .unwrap() + .iter() + .map(|(stash, _, targets)| (*stash, targets.len())) + .collect::>(), + vec![(11, 1), (21, 1), (31, 1), (71, 7)], + ); + }); +} + +mod sorted_list_provider { + use super::*; + use frame_election_provider_support::SortedListProvider; + + #[test] + fn re_nominate_does_not_change_counters_or_list() { + ExtBuilder::default().nominate(true).build_and_execute(|| { + // given + let pre_insert_voter_count = + (Nominators::::count() + Validators::::count()) as u32; + assert_eq!(::VoterList::count(), pre_insert_voter_count); + + assert_eq!( + ::VoterList::iter().collect::>(), + vec![11, 21, 31, 101] + ); + + // when account 101 renominates + assert_ok!(Staking::nominate(RuntimeOrigin::signed(101), vec![41])); + + // then counts don't change + assert_eq!(::VoterList::count(), pre_insert_voter_count); + // and the list is the same + assert_eq!( + ::VoterList::iter().collect::>(), + vec![11, 21, 31, 101] + ); + }); + } + + #[test] + fn re_validate_does_not_change_counters_or_list() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // given + let pre_insert_voter_count = + (Nominators::::count() + Validators::::count()) as u32; + assert_eq!(::VoterList::count(), pre_insert_voter_count); + + assert_eq!(::VoterList::iter().collect::>(), vec![11, 21, 31]); + + // when account 11 re-validates + assert_ok!(Staking::validate(RuntimeOrigin::signed(11), Default::default())); + + // then counts don't change + assert_eq!(::VoterList::count(), pre_insert_voter_count); + // and the list is the same + assert_eq!(::VoterList::iter().collect::>(), vec![11, 21, 31]); + }); + } +} + +mod paged_snapshot { + use super::*; + + #[test] + fn target_snapshot_works() { + ExtBuilder::default() + .nominate(true) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(101, StakerStatus::Idle) + .build_and_execute(|| { + // all registered validators. + let all_targets = vec![51, 31, 41, 21, 11]; + assert_eq_uvec!( + ::TargetList::iter().collect::>(), + all_targets, + ); + + // 3 targets per page. + let bounds = + ElectionBoundsBuilder::default().targets_count(3.into()).build().targets; + + let targets = + ::electable_targets(bounds, 0).unwrap(); + assert_eq_uvec!(targets, all_targets.iter().take(3).cloned().collect::>()); + + // emulates a no bounds target snapshot request. + let bounds = + ElectionBoundsBuilder::default().targets_count(u32::MAX.into()).build().targets; + + let single_page_targets = + ::electable_targets(bounds, 0).unwrap(); + + // complete set of paged targets is the same as single page, no bounds set of + // targets. + assert_eq_uvec!(all_targets, single_page_targets); + }) + } + + #[test] + fn target_snaposhot_multi_page_redundant() { + ExtBuilder::default().build_and_execute(|| { + let all_targets = vec![31, 21, 11]; + assert_eq_uvec!(::TargetList::iter().collect::>(), all_targets,); + + // no bounds. + let bounds = + ElectionBoundsBuilder::default().targets_count(u32::MAX.into()).build().targets; + + // target snapshot supports only single-page, thus it is redundant what's the page index + // requested. + let snapshot = Staking::electable_targets(bounds, 0).unwrap(); + assert!( + snapshot == all_targets && + snapshot == Staking::electable_targets(bounds, 1).unwrap() && + snapshot == Staking::electable_targets(bounds, 2).unwrap() && + snapshot == Staking::electable_targets(bounds, u32::MAX).unwrap(), + ); + }) + } + + #[test] + fn voter_snapshot_works() { + ExtBuilder::default() + .nominate(true) + .set_status(51, StakerStatus::Validator) + .set_status(41, StakerStatus::Nominator(vec![51])) + .set_status(101, StakerStatus::Validator) + .build_and_execute(|| { + let bounds = ElectionBoundsBuilder::default().voters_count(3.into()).build().voters; + assert_eq!( + ::VoterList::iter() + .collect::>() + .into_iter() + .map(|v| (v, ::VoterList::get_score(&v).unwrap())) + .collect::>(), + vec![(41, 4000), (51, 5000), (11, 1000), (21, 1000), (31, 500), (101, 500)], + ); + + let mut all_voters = vec![]; + + let voters_page_3 = ::electing_voters(bounds, 3) + .unwrap() + .into_iter() + .map(|(a, _, _)| a) + .collect::>(); + all_voters.extend(voters_page_3.clone()); + + assert_eq!(voters_page_3, vec![41, 51, 11]); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Ongoing(11)); + + let voters_page_2 = ::electing_voters(bounds, 2) + .unwrap() + .into_iter() + .map(|(a, _, _)| a) + .collect::>(); + all_voters.extend(voters_page_2.clone()); + + assert_eq!(voters_page_2, vec![21, 31, 101]); + + // all voters in the list have been consumed. + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Consumed); + + // thus page 1 and 0 are empty. + assert!(::electing_voters(bounds, 1) + .unwrap() + .is_empty()); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Consumed); + + assert!(::electing_voters(bounds, 0) + .unwrap() + .is_empty()); + + // last page has been requested, reset the snapshot status to waiting. + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); + + // now request 1 page with bounds where all registered voters fit. u32::MAX + // emulates a no bounds request. + let bounds = + ElectionBoundsBuilder::default().voters_count(u32::MAX.into()).build().targets; + + let single_page_voters = + ::electing_voters(bounds, 0) + .unwrap() + .into_iter() + .map(|(a, _, _)| a) + .collect::>(); + + // complete set of paged voters is the same as single page, no bounds set of + // voters. + assert_eq!(all_voters, single_page_voters); + }) + } + + #[test] + fn voter_list_locked_during_multi_page_snapshot() { + ExtBuilder::default() + .nominate(true) + .set_status(51, StakerStatus::Validator) + .set_status(41, StakerStatus::Nominator(vec![51])) + .set_status(101, StakerStatus::Validator) + .build_and_execute(|| { + let bounds = ElectionBoundsBuilder::default().voters_count(2.into()).build().voters; + assert_eq!( + ::VoterList::iter() + .collect::>() + .into_iter() + .map(|v| (v, ::VoterList::get_score(&v).unwrap())) + .collect::>(), + vec![(41, 4000), (51, 5000), (11, 1000), (21, 1000), (31, 500), (101, 500)], + ); + + // initially not locked + assert_eq!(pallet_bags_list::Lock::::get(), None); + + let voters_page_3 = ::electing_voters(bounds, 3) + .unwrap() + .into_iter() + .map(|(a, _, _)| a) + .collect::>(); + + assert_eq!(voters_page_3, vec![41, 51]); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Ongoing(51)); + assert_eq!(pallet_bags_list::Lock::::get(), Some(())); + + hypothetically!({}); + + let voters_page_2 = ::electing_voters(bounds, 2) + .unwrap() + .into_iter() + .map(|(a, _, _)| a) + .collect::>(); + + // still locked + assert_eq!(voters_page_2, vec![11, 21]); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Ongoing(21)); + assert_eq!(pallet_bags_list::Lock::::get(), Some(())); + + let voters_page_1 = ::electing_voters(bounds, 1) + .unwrap() + .into_iter() + .map(|(a, _, _)| a) + .collect::>(); + + // consumed, and we already unlock + assert_eq!(voters_page_1, vec![31, 101]); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Consumed); + assert_eq!(pallet_bags_list::Lock::::get(), None); + + // calling page zero will unlock us. + assert!(::electing_voters(bounds, 0) + .unwrap() + .is_empty()); + + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); + assert_eq!(pallet_bags_list::Lock::::get(), None); + }) + } + + #[test] + fn voter_list_not_updated_when_locked() { + ExtBuilder::default() + .nominate(true) + .set_status(51, StakerStatus::Validator) + .set_status(41, StakerStatus::Nominator(vec![51])) + .set_status(101, StakerStatus::Validator) + .build_and_execute(|| { + let bounds = ElectionBoundsBuilder::default().voters_count(2.into()).build().voters; + assert_eq!( + ::VoterList::iter() + .collect::>() + .into_iter() + .map(|v| (v, ::VoterList::get_score(&v).unwrap())) + .collect::>(), + vec![(41, 4000), (51, 5000), (11, 1000), (21, 1000), (31, 500), (101, 500)], + ); + + // initial bag of 51 + assert_eq!( + pallet_bags_list::ListNodes::::get(51) + .unwrap() + .bag_upper, + 10_000 + ); + + // original bag of 11 + assert_eq!( + pallet_bags_list::ListNodes::::get(11) + .unwrap() + .bag_upper, + 1000 + ); + + // initially not locked + assert_eq!(pallet_bags_list::Lock::::get(), None); + + let voters_page_3 = ::electing_voters(bounds, 3) + .unwrap() + .into_iter() + .map(|(a, _, _)| a) + .collect::>(); + + assert_eq!(voters_page_3, vec![41, 51]); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Ongoing(51)); + assert_eq!(pallet_bags_list::Lock::::get(), Some(())); + + // 51 who is already part of the list might want to unbond. They are already in the + // snapshot, and their position is not updated + hypothetically!({ + assert_ok!(Staking::unbond(RuntimeOrigin::signed(51), 500)); + // they are still in the original bag + assert_eq!( + pallet_bags_list::ListNodes::::get(51) + .unwrap() + .bag_upper, + 10_000 + ); + }); + + // 11 who is not part of the snapshot yet might want to bond a lot extra, this is + // not reflected in this election. + hypothetically!({ + crate::asset::set_stakeable_balance::(&11, 10000); + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 5000)); + // they are still in the original bag + assert_eq!( + pallet_bags_list::ListNodes::::get(11) + .unwrap() + .bag_upper, + 1000 + ); + }); + }) + } +} + +#[test] +fn from_most_staked_to_least_staked() { + ExtBuilder::default() + .nominate(true) + .set_status(51, StakerStatus::Validator) + .set_status(41, StakerStatus::Nominator(vec![51])) + .set_status(101, StakerStatus::Validator) + .set_stake(41, 11000) + .set_stake(51, 2500) + .set_stake(101, 35) + .build_and_execute(|| { + assert_eq!(THRESHOLDS.to_vec(), [10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]); + + assert_eq!( + ::VoterList::iter() + .collect::>() + .into_iter() + .map(|v| (v, ::VoterList::get_score(&v).unwrap())) + .collect::>(), + vec![(41, 11000), (51, 2500), (11, 1000), (21, 1000), (31, 500), (101, 35)], + ); + }); +} diff --git a/substrate/frame/staking-async/src/tests/election_provider.rs b/substrate/frame/staking-async/src/tests/election_provider.rs new file mode 100644 index 0000000000000..a91b82c1f2731 --- /dev/null +++ b/substrate/frame/staking-async/src/tests/election_provider.rs @@ -0,0 +1,994 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::session_rotation::{EraElectionPlanner, Eras}; +use frame_support::assert_ok; +use sp_npos_elections::Support; +use substrate_test_utils::assert_eq_uvec; + +use crate::tests::session_mock::ReceivedValidatorSets; + +#[test] +fn planning_era_offset_less_works() { + // same as `basic_setup_sessions_per_era`, but notice how `PagedElectionProceeded` happens + // one session later, and planning era is incremented one session later + ExtBuilder::default() + .session_per_era(6) + .planning_era_offset(0) + .no_flush_events() + .build_and_execute(|| { + // this essentially makes the session duration 7, because the mock session will buffer + // for one session before activating the era. + assert_eq!(Session::current_index(), 7); + assert_eq!(active_era(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 5, active_era: 0, planned_era: 1 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 6, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 17500, remainder: 17500 }, + Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 1 } + ] + ); + + Session::roll_until_active_era(2); + assert_eq!(Session::current_index(), 14); + assert_eq!(active_era(), 2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 9, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 10, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 11, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 12, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 13, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 17500, remainder: 17500 }, + Event::SessionRotated { starting_session: 14, active_era: 2, planned_era: 2 } + ] + ); + }); +} + +#[test] +fn planning_era_offset_more_works() { + ExtBuilder::default() + .session_per_era(6) + .planning_era_offset(2) + .no_flush_events() + .build_and_execute(|| { + // This effectively makes the era one session shorter. + assert_eq!(Session::current_index(), 5); + assert_eq!(active_era(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 1 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 12500, remainder: 12500 }, + Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 1 } + ] + ); + + Session::roll_until_active_era(2); + assert_eq!(Session::current_index(), 10); + assert_eq!(active_era(), 2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 9, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 12500, remainder: 12500 }, + Event::SessionRotated { starting_session: 10, active_era: 2, planned_era: 2 } + ] + ); + }); +} + +#[test] +fn new_era_elects_correct_number_of_validators() { + ExtBuilder::default().nominate(true).validator_count(1).build_and_execute(|| { + assert_eq!(ValidatorCount::::get(), 1); + assert_eq!(session_validators().len(), 1); + }) +} + +#[test] +fn less_than_needed_candidates_works() { + ExtBuilder::default().validator_count(4).nominate(false).build_and_execute(|| { + assert_eq_uvec!(Session::validators(), vec![31, 21, 11]); + Session::roll_until_active_era(2); + + // Previous set is selected. + assert_eq_uvec!(Session::validators(), vec![31, 21, 11]); + + // Only has self votes. + assert!(ErasStakersPaged::::iter_prefix_values((active_era(),)) + .all(|exposure| exposure.others.is_empty())); + }); +} + +mod paged_exposures { + use super::*; + + #[test] + fn can_page_exposure() { + let mut others: Vec> = vec![]; + let mut total_stake: Balance = 0; + // 19 nominators + for i in 1..20 { + let individual_stake: Balance = 100 * i as Balance; + others.push(IndividualExposure { who: i, value: individual_stake }); + total_stake += individual_stake; + } + let own_stake: Balance = 500; + total_stake += own_stake; + assert_eq!(total_stake, 19_500); + // build full exposure set + let exposure: Exposure = + Exposure { total: total_stake, own: own_stake, others }; + + // when + let (exposure_metadata, exposure_page): ( + PagedExposureMetadata, + Vec>, + ) = exposure.clone().into_pages(3); + + // then + // 7 pages of nominators. + assert_eq!(exposure_page.len(), 7); + assert_eq!(exposure_metadata.page_count, 7); + // first page stake = 100 + 200 + 300 + assert!(matches!(exposure_page[0], ExposurePage { page_total: 600, .. })); + // second page stake = 0 + 400 + 500 + 600 + assert!(matches!(exposure_page[1], ExposurePage { page_total: 1500, .. })); + // verify overview has the total + assert_eq!(exposure_metadata.total, 19_500); + // verify total stake is same as in the original exposure. + assert_eq!( + exposure_page.iter().map(|a| a.page_total).reduce(|a, b| a + b).unwrap(), + 19_500 - exposure_metadata.own + ); + // verify own stake is correct + assert_eq!(exposure_metadata.own, 500); + // verify number of nominators are same as in the original exposure. + assert_eq!(exposure_page.iter().map(|a| a.others.len()).reduce(|a, b| a + b).unwrap(), 19); + assert_eq!(exposure_metadata.nominator_count, 19); + } + + #[test] + fn store_stakers_info_elect_works() { + ExtBuilder::default().exposures_page_size(2).build_and_execute(|| { + assert_eq!(MaxExposurePageSize::get(), 2); + + let exposure_one = Exposure { + total: 1000 + 700, + own: 1000, + others: vec![ + IndividualExposure { who: 101, value: 500 }, + IndividualExposure { who: 102, value: 100 }, + IndividualExposure { who: 103, value: 100 }, + ], + }; + + let exposure_two = Exposure { + total: 1000 + 1000, + own: 1000, + others: vec![ + IndividualExposure { who: 104, value: 500 }, + IndividualExposure { who: 105, value: 500 }, + ], + }; + + let exposure_three = Exposure { + total: 1000 + 500, + own: 1000, + others: vec![ + IndividualExposure { who: 110, value: 250 }, + IndividualExposure { who: 111, value: 250 }, + ], + }; + + let exposures_page_one = bounded_vec![(1, exposure_one), (2, exposure_two),]; + let exposures_page_two = bounded_vec![(1, exposure_three),]; + + // our exposures are stored for this era. + let current_era = current_era(); + + // stores exposure page with exposures of validator 1 and 2, returns exposed validator + // account id. + assert_eq!( + EraElectionPlanner::::store_stakers_info(exposures_page_one, current_era) + .to_vec(), + vec![1, 2] + ); + + // Stakers overview OK for validator 1 and 2. + assert_eq!( + ErasStakersOverview::::get(current_era, &1).unwrap(), + PagedExposureMetadata { total: 1700, own: 1000, nominator_count: 3, page_count: 2 }, + ); + assert_eq!( + ErasStakersOverview::::get(current_era, &2).unwrap(), + PagedExposureMetadata { total: 2000, own: 1000, nominator_count: 2, page_count: 1 }, + ); + + // stores exposure page with exposures of validator 1, returns exposed validator + // account id. + assert_eq!( + EraElectionPlanner::::store_stakers_info(exposures_page_two, current_era) + .to_vec(), + vec![1] + ); + + // Stakers overview OK for validator 1. + assert_eq!( + ErasStakersOverview::::get(current_era, &1).unwrap(), + PagedExposureMetadata { total: 2200, own: 1000, nominator_count: 5, page_count: 3 }, + ); + + // validator 1 has 3 paged exposures. + assert!( + ErasStakersPaged::::iter_prefix_values((current_era, &1)).count() as u32 == + Eras::::exposure_page_count(current_era, &1) && + Eras::::exposure_page_count(current_era, &1) == 3 + ); + assert!(ErasStakersPaged::::get((current_era, &1, 0)).is_some()); + assert!(ErasStakersPaged::::get((current_era, &1, 1)).is_some()); + assert!(ErasStakersPaged::::get((current_era, &1, 2)).is_some()); + assert!(ErasStakersPaged::::get((current_era, &1, 3)).is_none()); + + // validator 2 has 1 paged exposures. + assert!(ErasStakersPaged::::get((current_era, &2, 0)).is_some()); + assert!(ErasStakersPaged::::get((current_era, &2, 1)).is_none()); + assert_eq!(ErasStakersPaged::::iter_prefix_values((current_era, &2)).count(), 1); + + // exposures of validator 1 are the expected: + assert_eq!( + ErasStakersPaged::::get((current_era, &1, 0)).unwrap(), + ExposurePage { + page_total: 600, + others: vec![ + IndividualExposure { who: 101, value: 500 }, + IndividualExposure { who: 102, value: 100 } + ] + }, + ); + assert_eq!( + ErasStakersPaged::::get((current_era, &1, 1)).unwrap(), + ExposurePage { + page_total: 350, + others: vec![ + IndividualExposure { who: 103, value: 100 }, + IndividualExposure { who: 110, value: 250 } + ] + } + ); + assert_eq!( + ErasStakersPaged::::get((current_era, &1, 2)).unwrap(), + ExposurePage { + page_total: 250, + others: vec![IndividualExposure { who: 111, value: 250 }] + } + ); + + // exposures of validator 2. + assert_eq!( + ErasStakersPaged::::iter_prefix_values((current_era, &2)).collect::>(), + vec![ExposurePage { + page_total: 1000, + others: vec![ + IndividualExposure { who: 104, value: 500 }, + IndividualExposure { who: 105, value: 500 } + ] + }], + ); + }) + } +} + +mod electable_stashes { + use super::*; + + #[test] + fn add_electable_stashes_work() { + ExtBuilder::default().try_state(false).build_and_execute(|| { + MaxValidatorSet::set(5); + assert_eq!(MaxValidatorSet::get(), 5); + assert!(ElectableStashes::::get().is_empty()); + + // adds stashes without duplicates, do not overflow bounds. + assert_ok!(EraElectionPlanner::::add_electables(vec![1u64, 2, 3].into_iter())); + assert_eq!( + ElectableStashes::::get().into_inner().into_iter().collect::>(), + vec![1, 2, 3] + ); + + // adds with duplicates which are deduplicated implicitly, no not overflow bounds. + assert_ok!(EraElectionPlanner::::add_electables(vec![1u64, 2, 4].into_iter())); + assert_eq!( + ElectableStashes::::get().into_inner().into_iter().collect::>(), + vec![1, 2, 3, 4] + ); + }) + } + + #[test] + fn add_electable_stashes_overflow_works() { + ExtBuilder::default().try_state(false).build_and_execute(|| { + MaxValidatorSet::set(5); + assert_eq!(MaxValidatorSet::get(), 5); + assert!(ElectableStashes::::get().is_empty()); + + // adds stashes so that bounds are overflown, fails and internal state changes so that + // all slots are filled. error will return the idx of the first account that was not + // included. + let expected_idx_not_included = 5; // stash 6. + assert_eq!( + EraElectionPlanner::::add_electables( + vec![1u64, 2, 3, 4, 5, 6, 7, 8].into_iter() + ), + Err(expected_idx_not_included) + ); + // the included were added to the electable stashes, despite the error. + assert_eq!( + ElectableStashes::::get().into_inner().into_iter().collect::>(), + vec![1, 2, 3, 4, 5] + ); + }) + } + + #[test] + fn overflow_electable_stashes_no_exposures_work() { + // ensures exposures are stored only for the electable stashes that fit within the + // electable stashes bounds in case of overflow. + ExtBuilder::default().try_state(false).build_and_execute(|| { + MaxValidatorSet::set(2); + assert!(ElectableStashes::::get().is_empty()); + + let supports = to_bounded_supports(vec![ + (1, Support { total: 100, voters: vec![(10, 1_000)] }), + (2, Support { total: 200, voters: vec![(20, 2_000)] }), + (3, Support { total: 300, voters: vec![(30, 3_000)] }), + (4, Support { total: 400, voters: vec![(40, 4_000)] }), + ]); + + // error due to bounds. + let expected_not_included = 2; + assert_eq!( + EraElectionPlanner::::do_elect_paged_inner(supports), + Err(expected_not_included) + ); + + // electable stashes have been collected to the max bounds despite the error. + assert_eq!(ElectableStashes::::get().into_iter().collect::>(), vec![1, 2]); + + let exposure_exists = |acc, era| Eras::::get_full_exposure(era, &acc).total != 0; + + // exposures were only collected for electable stashes in bounds (1 and 2). + assert!(exposure_exists(1, 1)); + assert!(exposure_exists(2, 1)); + assert!(!exposure_exists(3, 1)); + assert!(!exposure_exists(4, 1)); + }) + } +} + +mod paged_on_initialize_era_election_planner { + use pallet_staking_async_rc_client::ValidatorSetReport; + + use super::*; + + #[test] + fn single_page_election_works() { + ExtBuilder::default() + // set desired targets to 3. + .validator_count(3) + .build_and_execute(|| { + // single page. + let pages: BlockNumber = EraElectionPlanner::::election_pages().into(); + assert_eq!(pages, 1); + + // we will start the next election at the start of block 20 + assert_eq!(System::block_number(), 15); + assert_eq!(PlanningEraOffset::get(), 1); + + // genesis validators are now in place. + assert_eq!(current_era(), 1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31]); + + // force unstake of 31 to ensure the election results of the next era are + // different than genesis. + assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 31, 0)); + + // use all registered validators as potential targets. + let expected_elected = vec![11, 21]; + ValidatorCount::::set(expected_elected.len() as u32); + + // 1. start signal is sent, election result will come next block. + Session::roll_until(20); + assert_eq!(NextElectionPage::::get(), None); + assert!(ElectableStashes::::get().is_empty()); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); + + // 2. starts preparing election at the (election_prediction - n_pages) block. + Session::roll_next(); + + // electing started, but since single-page, we don't set `NextElectionPage` at all. + assert_eq!(NextElectionPage::::get(), None); + assert!(ElectableStashes::::get().is_empty()); + // Electable stashes are already drained and sent to RC client. + assert_eq!( + ReceivedValidatorSets::get_last(), + ValidatorSetReport { + id: 2, + leftover: false, + new_validator_set: vec![11, 21], + prune_up_to: None + } + ); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); + + assert_eq!(current_era(), 2); + assert_eq!(active_era(), 1); + + // check old exposures + assert_eq_uvec!( + era_exposures(1), + vec![ + ( + 11, + Exposure { + total: 1250, + own: 1000 as Balance, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ), + ( + 21, + Exposure { + total: 1250, + own: 1000 as Balance, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ), + (31, Exposure { total: 500, own: 500 as Balance, others: vec![] }), + ] + ); + + // check new exposures + assert_eq_uvec!( + era_exposures(2), + vec![ + ( + 11, + Exposure { + total: 1250, + own: 1000 as Balance, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ), + ( + 21, + Exposure { + total: 1250, + own: 1000 as Balance, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ), + ] + ); + + // era progressed and electable stashes have been served to session pallet. + assert_eq_uvec!(Session::validators(), vec![11, 21, 31]); + + // 4. in the next era, the validator set does not include 31 anymore which was + // unstaked. + Session::roll_until_active_era(2); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + }) + } + + #[test] + fn multi_page_election_works() { + ExtBuilder::default() + .add_staker(61, 1000, StakerStatus::Validator) + .add_staker(71, 1000, StakerStatus::Validator) + .add_staker(81, 1000, StakerStatus::Validator) + .add_staker(91, 1000, StakerStatus::Validator) + .multi_page_election_provider(3) + .validator_count(6) + .election_bounds(3, 10) + .build_and_execute(|| { + // NOTE: we cannot really enforce MaxBackersPerWinner and ValidatorCount here as our + // election provider in the mock is rather dumb and cannot respect them atm. + + // we will start the next election at the start of block 20 + assert_eq!(System::block_number(), 15); + assert_eq!(PlanningEraOffset::get(), 1); + + // 1. election signal is sent here, + Session::roll_until(20); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SessionRotated { + starting_session: 4, + active_era: 1, + planned_era: 2 + }] + ); + + assert_eq!(NextElectionPage::::get(), None); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); + assert!(ElectableStashes::::get().is_empty()); + + // page 2 fetched, next is 1 + Session::roll_until(21); + assert_eq!(NextElectionPage::::get(), Some(1)); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Ongoing(31)); + assert_eq!( + ElectableStashes::::get().into_iter().collect::>(), + vec![11, 21, 31] + ); + + assert_eq_uvec!( + era_exposures(2), + vec![ + ( + 11, + Exposure:: { + total: 1000, + own: 1000, + others: vec![] + } + ), + ( + 21, + Exposure:: { + total: 1000, + own: 1000, + others: vec![] + } + ), + ( + 31, + Exposure:: { total: 500, own: 500, others: vec![] } + ), + ] + ); + + // page 1, next is 0 + Session::roll_until(22); + // the electable stashes remain the same. + assert_eq_uvec!( + ElectableStashes::::get().into_iter().collect::>(), + vec![11, 21, 31, 61, 71] + ); + assert_eq!(NextElectionPage::::get(), Some(0)); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Ongoing(71)); + + assert_eq_uvec!( + era_exposures(2), + vec![ + ( + 11, + Exposure:: { + total: 1250, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ), + ( + 21, + Exposure:: { + total: 1250, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ), + ( + 31, + Exposure:: { total: 500, own: 500, others: vec![] } + ), + ( + 71, + Exposure:: { + total: 1000, + own: 1000, + others: vec![] + } + ), + ( + 61, + Exposure:: { + total: 1000, + own: 1000, + others: vec![] + } + ) + ] + ); + + // fetch 0, done. + Session::roll_until(23); + // the electable stashes are now empty + assert!(ElectableStashes::::get().is_empty()); + assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); + assert_eq!(NextElectionPage::::get(), None); + + // check exposures + assert_eq_uvec!( + era_exposures(2), + vec![ + ( + 31, + Exposure:: { total: 500, own: 500, others: vec![] } + ), + ( + 21, + Exposure:: { + total: 1250, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ), + ( + 81, + Exposure:: { + total: 1000, + own: 1000, + others: vec![] + } + ), + ( + 71, + Exposure:: { + total: 1000, + own: 1000, + others: vec![] + } + ), + ( + 91, + Exposure:: { + total: 1000, + own: 1000, + others: vec![] + } + ), + ( + 11, + Exposure:: { + total: 1250, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ), + ( + 61, + Exposure:: { + total: 1000, + own: 1000, + others: vec![] + } + ) + ] + ); + + // and are sent + assert_eq!( + ReceivedValidatorSets::get_last(), + ValidatorSetReport { + id: 2, + leftover: false, + new_validator_set: vec![11, 21, 31, 61, 71, 81, 91], + prune_up_to: None + } + ); + + assert_eq!(NextElectionPage::::get(), None); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PagedElectionProceeded { page: 2, result: Ok(3) }, + Event::PagedElectionProceeded { page: 1, result: Ok(2) }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) } + ] + ); + + // go to activation of this validator set. + Session::roll_until_active_era(2); + + // the new era validators are the expected elected stashes. + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 61, 71, 81, 91]); + }) + } + + // #[test] + // fn multi_page_exposure_and_multi_page_elect() { + // todo!("an election with 3 pages, with 4 backers per exposures, which are stored in + // MaxExposurePageSize = 6, ergo transformed into 2 pages of final exposure") } + + // #[test] + // fn multi_page_election_with_mulit_page_exposures_rewards_work() { + // ExtBuilder::default() + // .add_staker(61, 61, 1000, StakerStatus::Validator) + // .add_staker(71, 71, 1000, StakerStatus::Validator) + // .add_staker(1, 1, 5, StakerStatus::Nominator(vec![21, 31, 71])) + // .add_staker(2, 2, 5, StakerStatus::Nominator(vec![21, 31, 71])) + // .add_staker(3, 3, 5, StakerStatus::Nominator(vec![21, 31, 71])) + // .multi_page_election_provider(3) + // .max_winners_per_page(3) + // .exposures_page_size(2) + // .build_and_execute(|| { + // // election provider has 3 pages. + // let pages: BlockNumber = + // <::ElectionProvider as ElectionProvider>::Pages::get().into(); + // assert_eq!(pages, 3); + // // 3 max winners per page. + // let max_winners_page = <::ElectionProvider as + // ElectionProvider>::MaxWinnersPerPage::get(); assert_eq!(max_winners_page, 3); + + // // setup validator payee prefs and 10% commission. + // for s in vec![21, 31, 71] { + // Payee::::insert(s, RewardDestination::Account(s)); + // let prefs = ValidatorPrefs { commission: Perbill::from_percent(10), + // ..Default::default() }; Validators::::insert(s, prefs.clone()); + // } + + // let init_balance_all = vec![21, 31, 71, 1, 2, 3].iter().fold(0, |mut acc, s| { + // acc += asset::total_balance::(&s); + // acc + // }); + + // // progress era. + // assert_eq!(current_era(), 0); + // start_active_era(1); + // assert_eq!(current_era(), 1); + // assert_eq!(Session::validators(), vec![21, 31, 71]); + + // // distribute reward, + // Pallet::::reward_by_ids(vec![(21, 50)]); + // Pallet::::reward_by_ids(vec![(31, 50)]); + // Pallet::::reward_by_ids(vec![(71, 50)]); + + // let total_payout = validator_payout_for(time_per_era()); + + // start_active_era(2); + + // // all the validators exposed in era 1 have two pages of exposures, since + // exposure // page size is 2. + // assert_eq!(MaxExposurePageSize::get(), 2); + // assert_eq!(Eras::::exposure_page_count(1, &21), 2); + // assert_eq!(Eras::::exposure_page_count(1, &31), 2); + // assert_eq!(Eras::::exposure_page_count(1, &71), 2); + + // make_all_reward_payment(1); + + // let balance_all = vec![21, 31, 71, 1, 2, 3].iter().fold(0, |mut acc, s| { + // acc += asset::total_balance::(&s); + // acc + // }); + + // assert_eq_error_rate!( + // total_payout, + // balance_all - init_balance_all, + // 4 + // ); + // }) + // } + + // #[test] + // fn multi_page_election_is_graceful() { + // // demonstrate that in a multi-page election, in some of the `elect(_)` calls fail we won't + // // bail right away. + // ExtBuilder::default().multi_page_election_provider(3).build_and_execute(|| { + // // load some exact data into the election provider, some of which are error or empty. + // let correct_results = ::GenesisElectionProvider::elect(0); + // CustomElectionSupports::set(Some(vec![ + // // page 0. + // correct_results.clone(), + // // page 1. + // Err(onchain::Error::FailedToBound), + // // page 2. + // Ok(Default::default()), + // ])); + + // // genesis era. + // assert_eq!(current_era(), 0); + + // let next_election = + // ::next_election_prediction(System::block_number()); + // assert_eq!(next_election, 10); + + // // try-state sanity check. + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // 1. election prep hasn't started yet, election cursor and electable stashes are + // // not set yet. + // roll_to_block(6); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + // assert_eq!(NextElectionPage::::get(), None); + // assert!(ElectableStashes::::get().is_empty()); + + // // 2. starts preparing election at the (election_prediction - n_pages) block. + // // fetches lsp (i.e. 2). + // roll_to_block(7); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // electing started at cursor is set once the election starts to be prepared. + // assert_eq!(NextElectionPage::::get(), Some(1)); + // // in elect(2) we won't collect any stashes yet. + // assert!(ElectableStashes::::get().is_empty()); + + // // 3. progress one block to fetch page 1. + // roll_to_block(8); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // in elect(1) we won't collect any stashes yet. + // assert!(ElectableStashes::::get().is_empty()); + // // election cursor is updated + // assert_eq!(NextElectionPage::::get(), Some(0)); + + // // 4. progress one block to fetch mps (i.e. 0). + // roll_to_block(9); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // some stashes come in. + // assert_eq!( + // ElectableStashes::::get().into_iter().collect::>(), + // vec![11 as AccountId, 21] + // ); + // // cursor is now none + // assert_eq!(NextElectionPage::::get(), None); + + // // events thus far + // assert_eq!( + // staking_events_since_last_call(), + // vec![ + // Event::PagedElectionProceeded { page: 2, result: Ok(0) }, + // Event::PagedElectionProceeded { page: 1, result: Err(0) }, + // Event::PagedElectionProceeded { page: 0, result: Ok(2) } + // ] + // ); + + // // upon fetching page 0, the electing started will remain in storage until the + // // era rotates. + // assert_eq!(current_era(), 0); + + // // Next block the era will rotate. + // roll_to_block(10); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // and all the metadata has been cleared up and ready for the next election. + // assert!(NextElectionPage::::get().is_none()); + // assert!(ElectableStashes::::get().is_empty()); + + // // and the overall staking worked fine. + // assert_eq!(staking_events_since_last_call(), vec![Event::StakersElected]); + // }) + // } + + // #[test] + // fn multi_page_election_fails_if_not_enough_validators() { + // // a graceful multi-page election still fails if not enough validators are provided. + // ExtBuilder::default().multi_page_election_provider(3).build_and_execute(|| { + // // load some exact data into the election provider, some of which are error or + // // empty. + // let correct_results = ::GenesisElectionProvider::elect(0); + // CustomElectionSupports::set(Some(vec![ + // // page 0. + // correct_results.clone(), + // // page 1. + // Err(onchain::Error::FailedToBound), + // // page 2. + // Ok(Default::default()), + // ])); + + // // genesis era. + // assert_eq!(current_era(), 0); + + // let next_election = + // ::next_election_prediction(System::block_number()); + // assert_eq!(next_election, 10); + + // // try-state sanity check. + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // 1. election prep hasn't started yet, election cursor and electable stashes are + // // not set yet. + // roll_to_block(6); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + // assert_eq!(NextElectionPage::::get(), None); + // assert!(ElectableStashes::::get().is_empty()); + + // // 2. starts preparing election at the (election_prediction - n_pages) block. + // // fetches lsp (i.e. 2). + // roll_to_block(7); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // electing started at cursor is set once the election starts to be prepared. + // assert_eq!(NextElectionPage::::get(), Some(1)); + // // in elect(2) we won't collect any stashes yet. + // assert!(ElectableStashes::::get().is_empty()); + + // // 3. progress one block to fetch page 1. + // roll_to_block(8); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // in elect(1) we won't collect any stashes yet. + // assert!(ElectableStashes::::get().is_empty()); + // // election cursor is updated + // assert_eq!(NextElectionPage::::get(), Some(0)); + + // // 4. progress one block to fetch mps (i.e. 0). + // roll_to_block(9); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // some stashes come in. + // assert_eq!( + // ElectableStashes::::get().into_iter().collect::>(), + // vec![11 as AccountId, 21] + // ); + // // cursor is now none + // assert_eq!(NextElectionPage::::get(), None); + + // // events thus far + // assert_eq!( + // staking_events_since_last_call(), + // vec![ + // Event::PagedElectionProceeded { page: 2, result: Ok(0) }, + // Event::PagedElectionProceeded { page: 1, result: Err(0) }, + // Event::PagedElectionProceeded { page: 0, result: Ok(2) } + // ] + // ); + + // // upon fetching page 0, the electing started will remain in storage until the + // // era rotates. + // assert_eq!(current_era(), 0); + + // // Next block the era will rotate. + // roll_to_block(10); + // assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); + + // // and all the metadata has been cleared up and ready for the next election. + // assert!(NextElectionPage::::get().is_none()); + // assert!(ElectableStashes::::get().is_empty()); + + // // and the overall staking worked fine. + // assert_eq!(staking_events_since_last_call(), vec![Event::StakingElectionFailed]); + // }) + // } +} diff --git a/substrate/frame/staking-async/src/tests/era_rotation.rs b/substrate/frame/staking-async/src/tests/era_rotation.rs new file mode 100644 index 0000000000000..71ddd8f181d96 --- /dev/null +++ b/substrate/frame/staking-async/src/tests/era_rotation.rs @@ -0,0 +1,346 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::session_rotation::Eras; + +use super::*; + +#[test] +fn forcing_force_none() { + ExtBuilder::default().build_and_execute(|| { + ForceEra::::put(Forcing::ForceNone); + + Session::roll_to_next_session(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SessionRotated { starting_session: 4, active_era: 1, planned_era: 1 }] + ); + + Session::roll_to_next_session(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 1 }] + ); + + Session::roll_to_next_session(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 }] + ); + + Session::roll_to_next_session(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 1 }] + ); + + Session::roll_to_next_session(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 1 }] + ); + }); +} + +#[test] +fn forcing_no_forcing_default() { + ExtBuilder::default().build_and_execute(|| { + // default value, setting it again just for read-ability. + ForceEra::::put(Forcing::NotForcing); + + Session::roll_until_active_era(2); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 4, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 6, active_era: 2, planned_era: 2 } + ] + ); + }); +} + +#[test] +fn forcing_force_always() { + ExtBuilder::default() + .session_per_era(6) + .no_flush_events() + .build_and_execute(|| { + // initial events thus far, without `ForceAlways` set. + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 1 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 } + ] + ); + + // but with it set.. + ForceEra::::put(Forcing::ForceAlways); + + Session::roll_until_active_era(2); + assert_eq!( + staking_events_since_last_call(), + vec![ + // we immediately plan a new era as soon as the first session report comes in + Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + // by now it is given to mock session, and is buffered + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 7500, remainder: 7500 }, + // and by now it is activated. Note how the validator payout is less, since the + // era duration is less. Note that we immediately plan the next era as well. + Event::SessionRotated { starting_session: 9, active_era: 2, planned_era: 3 } + ] + ); + }); +} + +#[test] +fn forcing_force_new() { + ExtBuilder::default() + .session_per_era(6) + .no_flush_events() + .build_and_execute(|| { + // initial events thus far, without `ForceAlways` set. + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 1 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 } + ] + ); + + // but with it set.. + ForceEra::::put(Forcing::ForceNew); + + // one era happens quicker + Session::roll_until_active_era(2); + assert_eq!( + staking_events_since_last_call(), + vec![ + // we immediately plan a new era as soon as the first session report comes in + Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + // by now it is given to mock session, and is buffered + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 7500, remainder: 7500 }, + // and by now it is activated. Note how the validator payout is less, since the + // era duration is less. + Event::SessionRotated { starting_session: 9, active_era: 2, planned_era: 2 } + ] + ); + + // And the next era goes back to normal. + Session::roll_until_active_era(3); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 10, active_era: 2, planned_era: 2 }, + Event::SessionRotated { starting_session: 11, active_era: 2, planned_era: 2 }, + Event::SessionRotated { starting_session: 12, active_era: 2, planned_era: 2 }, + Event::SessionRotated { starting_session: 13, active_era: 2, planned_era: 3 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 14, active_era: 2, planned_era: 3 }, + Event::EraPaid { era_index: 2, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 15, active_era: 3, planned_era: 3 } + ] + ); + }); +} + +#[test] +#[should_panic] +fn activation_timestamp_when_no_planned_era() { + // maybe not needed, as we have the id check + todo!("what if we receive an activation timestamp when there is no planned era?"); +} + +#[test] +#[should_panic] +fn activation_timestamp_when_era_planning_not_complete() { + // maybe not needed, as we have the id check + todo!("what if we receive an activation timestamp when the era planning (election) is not complete?"); +} + +#[test] +#[should_panic] +fn max_era_duration_safety_guard() { + todo!("a safety guard that ensures that there is an upper bound on how long an era duration can be. Should prevent us from parabolic inflation in case of some crazy bug."); +} + +#[test] +fn era_cleanup_history_depth_works() { + // TODO: try-state for it + ExtBuilder::default().build_and_execute(|| { + // when we go forward to `HistoryDepth - 1` + assert_eq!(active_era(), 1); + + Session::roll_until_active_era(HistoryDepth::get() - 1); + assert!(matches!( + &staking_events_since_last_call()[..], + &[ + .., + Event::SessionRotated { starting_session: 236, active_era: 78, planned_era: 79 }, + Event::EraPaid { era_index: 78, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 237, active_era: 79, planned_era: 79 } + ] + )); + assert_ok!(Eras::::era_present(1)); + assert_ok!(Eras::::era_present(2)); + // .. + assert_ok!(Eras::::era_present(HistoryDepth::get() - 1)); + + Session::roll_until_active_era(HistoryDepth::get()); + assert_ok!(Eras::::era_present(1)); + assert_ok!(Eras::::era_present(2)); + // .. + assert_ok!(Eras::::era_present(HistoryDepth::get())); + + // then first era info should have been deleted + Session::roll_until_active_era(HistoryDepth::get() + 1); + assert_ok!(Eras::::era_present(1)); + assert_ok!(Eras::::era_present(2)); + // .. + assert_ok!(Eras::::era_present(HistoryDepth::get() + 1)); + + Session::roll_until_active_era(HistoryDepth::get() + 2); + assert_ok!(Eras::::era_absent(1)); + assert_ok!(Eras::::era_present(2)); + assert_ok!(Eras::::era_present(3)); + // .. + assert_ok!(Eras::::era_present(HistoryDepth::get() + 2)); + }); +} + +mod inflation { + use super::*; + + #[test] + fn max_staked_rewards_default_not_set_works() { + ExtBuilder::default().build_and_execute(|| { + let default_stakers_payout = validator_payout_for(time_per_era()); + assert!(default_stakers_payout > 0); + + assert_eq!(>::get(), None); + + Session::roll_until_active_era(2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 4, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 6, active_era: 2, planned_era: 2 } + ] + ); + + // the final stakers reward is the same as the reward before applied the cap. + assert_eq!(ErasValidatorReward::::get(0).unwrap(), default_stakers_payout); + }) + } + + #[test] + fn max_staked_rewards_default_equal_100() { + ExtBuilder::default().build_and_execute(|| { + let default_stakers_payout = validator_payout_for(time_per_era()); + assert!(default_stakers_payout > 0); + >::set(Some(Percent::from_parts(100))); + + Session::roll_until_active_era(2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 4, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 6, active_era: 2, planned_era: 2 } + ] + ); + + // the final stakers reward is the same as the reward before applied the cap. + assert_eq!(ErasValidatorReward::::get(0).unwrap(), default_stakers_payout); + }); + } + + #[test] + fn max_staked_rewards_works() { + ExtBuilder::default().nominate(true).build_and_execute(|| { + // sets new max staked rewards through set_staking_configs. + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Noop, + ConfigOp::Set(Percent::from_percent(10)), + )); + + assert_eq!(>::get(), Some(Percent::from_percent(10))); + + // check validators account state. + assert_eq!(Session::validators().len(), 2); + assert!(Session::validators().contains(&11) & Session::validators().contains(&21)); + + // balance of the mock treasury account is 0 + assert_eq!(RewardRemainderUnbalanced::get(), 0); + + Session::roll_until_active_era(2); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 4, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 1500, remainder: 13500 }, + Event::SessionRotated { starting_session: 6, active_era: 2, planned_era: 2 } + ] + ); + + let treasury_payout = RewardRemainderUnbalanced::get(); + let validators_payout = ErasValidatorReward::::get(1).unwrap(); + let total_payout = treasury_payout + validators_payout; + + // total payout is the same + assert_eq!(total_payout, total_payout_for(time_per_era())); + // validators get only 10% + assert_eq!(validators_payout, Percent::from_percent(10) * total_payout); + // treasury gets 90% + assert_eq!(treasury_payout, Percent::from_percent(90) * total_payout); + }) + } +} diff --git a/substrate/frame/staking-async/src/tests/force_unstake_kill_stash.rs b/substrate/frame/staking-async/src/tests/force_unstake_kill_stash.rs new file mode 100644 index 0000000000000..39c9e469b919c --- /dev/null +++ b/substrate/frame/staking-async/src/tests/force_unstake_kill_stash.rs @@ -0,0 +1,62 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +#[test] +fn force_unstake_works() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Staking::bonded(&11), Some(11)); + + // Is bonded -- cannot transfer + assert_noop!( + Balances::transfer_allow_death(RuntimeOrigin::signed(11), 1, 10), + TokenError::FundsUnavailable, + ); + + // Force unstake requires root. + assert_noop!(Staking::force_unstake(RuntimeOrigin::signed(11), 11, 0), BadOrigin); + + // slashing span doesn't matter, can be any value. + hypothetically! {{ + assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 11, 42)); + }}; + + assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 11, 0)); + + // No longer bonded, can transfer out + assert_eq!(Staking::bonded(&11), None); + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(11), 1, 10)); + }); +} + +#[test] +fn kill_stash_works() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Staking::bonded(&11), Some(11)); + + assert_noop!(Staking::kill_stash(&12, 0), Error::::NotStash); + + // slashing spans don't matter, can be any value + hypothetically!({ + assert_ok!(Staking::kill_stash(&11, 42)); + }); + + assert_ok!(Staking::kill_stash(&11, 2)); + assert_eq!(Staking::bonded(&11), None); + }); +} diff --git a/substrate/frame/staking-async/src/tests/ledger.rs b/substrate/frame/staking-async/src/tests/ledger.rs new file mode 100644 index 0000000000000..f0b62e5d1f71d --- /dev/null +++ b/substrate/frame/staking-async/src/tests/ledger.rs @@ -0,0 +1,856 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +#[test] +fn paired_account_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Staking::bond(RuntimeOrigin::signed(10), 100, RewardDestination::Account(10))); + + assert_eq!(>::get(&10), Some(10)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Controller(10)), Some(10)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(10)), Some(10)); + + assert_eq!(>::get(&42), None); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Controller(42)), None); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(42)), None); + + // bond manually stash with different controller. This is deprecated but the migration + // has not been complete yet (controller: 100, stash: 200) + assert_ok!(bond_controller_stash(100, 200)); + assert_eq!(>::get(&200), Some(100)); + assert_eq!( + StakingLedger::::paired_account(StakingAccount::Controller(100)), + Some(200) + ); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(200)), Some(100)); + }) +} + +#[test] +fn get_ledger_works() { + ExtBuilder::default().build_and_execute(|| { + // stash does not exist + assert!(StakingLedger::::get(StakingAccount::Stash(42)).is_err()); + + // bonded and paired + assert_eq!(>::get(&11), Some(11)); + + match StakingLedger::::get(StakingAccount::Stash(11)) { + Ok(ledger) => { + assert_eq!(ledger.controller(), Some(11)); + assert_eq!(ledger.stash, 11); + }, + Err(_) => panic!("staking ledger must exist"), + }; + + // bond manually stash with different controller. This is deprecated but the migration + // has not been complete yet (controller: 100, stash: 200) + assert_ok!(bond_controller_stash(100, 200)); + assert_eq!(>::get(&200), Some(100)); + + match StakingLedger::::get(StakingAccount::Stash(200)) { + Ok(ledger) => { + assert_eq!(ledger.controller(), Some(100)); + assert_eq!(ledger.stash, 200); + }, + Err(_) => panic!("staking ledger must exist"), + }; + + match StakingLedger::::get(StakingAccount::Controller(100)) { + Ok(ledger) => { + assert_eq!(ledger.controller(), Some(100)); + assert_eq!(ledger.stash, 200); + }, + Err(_) => panic!("staking ledger must exist"), + }; + }) +} + +#[test] +fn get_ledger_bad_state_fails() { + ExtBuilder::default().has_stakers(false).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // Case 1: double bonded but not corrupted: + // stash 444 has controller 555: + assert_eq!(Bonded::::get(444), Some(555)); + assert_eq!(Ledger::::get(555).unwrap().stash, 444); + + // stash 444 is also a controller of 333: + assert_eq!(Bonded::::get(333), Some(444)); + assert_eq!(StakingLedger::::paired_account(StakingAccount::Stash(333)), Some(444)); + assert_eq!(Ledger::::get(444).unwrap().stash, 333); + + // although 444 is double bonded (it is a controller and a stash of different ledgers), + // we can safely retrieve the ledger and mutate it since the correct ledger is + // returned. + let ledger_result = StakingLedger::::get(StakingAccount::Stash(444)); + assert_eq!(ledger_result.unwrap().stash, 444); // correct ledger. + + let ledger_result = StakingLedger::::get(StakingAccount::Controller(444)); + assert_eq!(ledger_result.unwrap().stash, 333); // correct ledger. + + // fetching ledger 333 by its stash works. + let ledger_result = StakingLedger::::get(StakingAccount::Stash(333)); + assert_eq!(ledger_result.unwrap().stash, 333); + + // Case 2: corrupted ledger bonding. + // in this case, we simulate what happens when fetching a ledger by stash returns a + // ledger with a different stash. when this happens, we return an error instead of the + // ledger to prevent ledger mutations. + let mut ledger = Ledger::::get(444).unwrap(); + assert_eq!(ledger.stash, 333); + ledger.stash = 444; + Ledger::::insert(444, ledger); + + // now, we are prevented from fetching the ledger by stash from 1. It's associated + // controller (2) is now bonding a ledger with a different stash (2, not 1). + assert!(StakingLedger::::get(StakingAccount::Stash(333)).is_err()); + }) +} + +#[test] +fn bond_works() { + ExtBuilder::default().build_and_execute(|| { + asset::set_stakeable_balance::(&42, 1000); + assert!(!StakingLedger::::is_bonded(StakingAccount::Stash(42))); + assert!(>::get(&42).is_none()); + + let mut ledger: StakingLedger = StakingLedger::new(42, 84); + let reward_dest = RewardDestination::Account(10); + + assert_ok!(ledger.clone().bond(reward_dest)); + assert!(StakingLedger::::is_bonded(StakingAccount::Stash(42))); + assert!(>::get(&42).is_some()); + assert_eq!(>::get(&42), Some(reward_dest)); + + // cannot bond again. + assert!(ledger.clone().bond(reward_dest).is_err()); + + // once bonded, unbonding (or any other update) works as expected. + ledger.unlocking = bounded_vec![UnlockChunk { era: 42, value: 42 }]; + ledger.active -= 42; + assert_ok!(ledger.update()); + }) +} + +#[test] +fn bond_controller_cannot_be_stash_works() { + ExtBuilder::default().build_and_execute(|| { + let (stash, controller) = testing_utils::create_unique_stash_controller::( + 0, + 10, + RewardDestination::Staked, + false, + ) + .unwrap(); + + assert_eq!(Bonded::::get(stash), Some(controller)); + assert_eq!(Ledger::::get(controller).map(|l| l.stash), Some(stash)); + + // existing controller should not be able become a stash. + assert_noop!( + Staking::bond(RuntimeOrigin::signed(controller), 10, RewardDestination::Staked), + Error::::AlreadyPaired, + ); + }) +} + +#[test] +fn is_bonded_works() { + ExtBuilder::default().build_and_execute(|| { + assert!(!StakingLedger::::is_bonded(StakingAccount::Stash(42))); + assert!(!StakingLedger::::is_bonded(StakingAccount::Controller(42))); + + // adds entry to Bonded without Ledger pair (should not happen). + >::insert(42, 42); + assert!(!StakingLedger::::is_bonded(StakingAccount::Controller(42))); + + assert_eq!(>::get(&11), Some(11)); + assert!(StakingLedger::::is_bonded(StakingAccount::Stash(11))); + assert!(StakingLedger::::is_bonded(StakingAccount::Controller(11))); + + >::remove(42); // ensures try-state checks pass. + }) +} + +#[test] +#[allow(deprecated)] +fn set_payee_errors_on_controller_destination() { + ExtBuilder::default().build_and_execute(|| { + Payee::::insert(11, RewardDestination::Staked); + assert_noop!( + Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller), + Error::::ControllerDeprecated + ); + assert_eq!(Payee::::get(&11), Some(RewardDestination::Staked)); + }) +} + +#[test] +#[allow(deprecated)] +fn update_payee_migration_works() { + ExtBuilder::default().build_and_execute(|| { + // migrate a `Controller` variant to `Account` variant. + Payee::::insert(11, RewardDestination::Controller); + assert_eq!(Payee::::get(&11), Some(RewardDestination::Controller)); + assert_ok!(Staking::update_payee(RuntimeOrigin::signed(11), 11)); + assert_eq!(Payee::::get(&11), Some(RewardDestination::Account(11))); + + // Do not migrate a variant if not `Controller`. + Payee::::insert(21, RewardDestination::Stash); + assert_eq!(Payee::::get(&21), Some(RewardDestination::Stash)); + assert_noop!( + Staking::update_payee(RuntimeOrigin::signed(11), 21), + Error::::NotController + ); + assert_eq!(Payee::::get(&21), Some(RewardDestination::Stash)); + }) +} + +#[test] +fn set_controller_with_bad_state_ok() { + ExtBuilder::default().has_stakers(false).nominate(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // in this case, setting controller works due to the ordering of the calls. + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(333))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(444))); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(555))); + }) +} + +#[test] +fn set_controller_with_bad_state_fails() { + ExtBuilder::default().has_stakers(false).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // setting the controller of ledger associated with stash 555 fails since its stash is a + // controller of another ledger. + assert_noop!(Staking::set_controller(RuntimeOrigin::signed(555)), Error::::BadState); + assert_noop!(Staking::set_controller(RuntimeOrigin::signed(444)), Error::::BadState); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(333))); + }) +} + +mod deprecate_controller_call { + use super::*; + + #[test] + fn deprecate_controller_batch_works_full_weight() { + ExtBuilder::default().try_state(false).build_and_execute(|| { + // Given: + + let start = 1001; + let mut controllers: Vec<_> = vec![]; + for n in start..(start + MaxControllersInDeprecationBatch::get()).into() { + let ctlr: u64 = n.into(); + let stash: u64 = (n + 10000).into(); + + Ledger::::insert( + ctlr, + StakingLedger { + controller: None, + total: (10 + ctlr).into(), + active: (10 + ctlr).into(), + ..StakingLedger::default_from(stash) + }, + ); + Bonded::::insert(stash, ctlr); + Payee::::insert(stash, RewardDestination::Staked); + + controllers.push(ctlr); + } + + // When: + + let bounded_controllers: BoundedVec< + _, + ::MaxControllersInDeprecationBatch, + > = BoundedVec::try_from(controllers).unwrap(); + + // Only `AdminOrigin` can sign. + assert_noop!( + Staking::deprecate_controller_batch( + RuntimeOrigin::signed(2), + bounded_controllers.clone() + ), + BadOrigin + ); + + let result = + Staking::deprecate_controller_batch(RuntimeOrigin::root(), bounded_controllers); + assert_ok!(result); + assert_eq!( + result.unwrap().actual_weight.unwrap(), + ::WeightInfo::deprecate_controller_batch( + ::MaxControllersInDeprecationBatch::get() + ) + ); + + // Then: + + for n in start..(start + MaxControllersInDeprecationBatch::get()).into() { + let ctlr: u64 = n.into(); + let stash: u64 = (n + 10000).into(); + + // Ledger no longer keyed by controller. + assert_eq!(Ledger::::get(ctlr), None); + // Bonded now maps to the stash. + assert_eq!(Bonded::::get(stash), Some(stash)); + + // Ledger is now keyed by stash. + let ledger_updated = Ledger::::get(stash).unwrap(); + assert_eq!(ledger_updated.stash, stash); + + // Check `active` and `total` values match the original ledger set by controller. + assert_eq!(ledger_updated.active, (10 + ctlr).into()); + assert_eq!(ledger_updated.total, (10 + ctlr).into()); + } + }) + } + + #[test] + fn deprecate_controller_batch_works_half_weight() { + ExtBuilder::default().build_and_execute(|| { + // Given: + + let start = 1001; + let mut controllers: Vec<_> = vec![]; + for n in start..(start + MaxControllersInDeprecationBatch::get()).into() { + let ctlr: u64 = n.into(); + + // Only half of entries are unique pairs. + let stash: u64 = if n % 2 == 0 { (n + 10000).into() } else { ctlr }; + + Ledger::::insert( + ctlr, + StakingLedger { controller: None, ..StakingLedger::default_from(stash) }, + ); + Bonded::::insert(stash, ctlr); + Payee::::insert(stash, RewardDestination::Staked); + + controllers.push(ctlr); + } + + // When: + let bounded_controllers: BoundedVec< + _, + ::MaxControllersInDeprecationBatch, + > = BoundedVec::try_from(controllers.clone()).unwrap(); + + let result = + Staking::deprecate_controller_batch(RuntimeOrigin::root(), bounded_controllers); + assert_ok!(result); + assert_eq!( + result.unwrap().actual_weight.unwrap(), + ::WeightInfo::deprecate_controller_batch(controllers.len() as u32) + ); + + // Then: + + for n in start..(start + MaxControllersInDeprecationBatch::get()).into() { + let unique_pair = n % 2 == 0; + let ctlr: u64 = n.into(); + let stash: u64 = if unique_pair { (n + 10000).into() } else { ctlr }; + + // Side effect of migration for unique pair. + if unique_pair { + assert_eq!(Ledger::::get(ctlr), None); + } + // Bonded maps to the stash. + assert_eq!(Bonded::::get(stash), Some(stash)); + + // Ledger is keyed by stash. + let ledger_updated = Ledger::::get(stash).unwrap(); + assert_eq!(ledger_updated.stash, stash); + } + }) + } + + #[test] + fn deprecate_controller_batch_skips_unmigrated_controller_payees() { + ExtBuilder::default().try_state(false).build_and_execute(|| { + // Given: + + let stash: u64 = 1000; + let ctlr: u64 = 1001; + + Ledger::::insert( + ctlr, + StakingLedger { controller: None, ..StakingLedger::default_from(stash) }, + ); + Bonded::::insert(stash, ctlr); + #[allow(deprecated)] + Payee::::insert(stash, RewardDestination::Controller); + + // When: + + let bounded_controllers: BoundedVec< + _, + ::MaxControllersInDeprecationBatch, + > = BoundedVec::try_from(vec![ctlr]).unwrap(); + + let result = + Staking::deprecate_controller_batch(RuntimeOrigin::root(), bounded_controllers); + assert_ok!(result); + assert_eq!( + result.unwrap().actual_weight.unwrap(), + ::WeightInfo::deprecate_controller_batch(1 as u32) + ); + + // Then: + + // Esure deprecation did not happen. + assert_eq!(Ledger::::get(ctlr).is_some(), true); + + // Bonded still keyed by controller. + assert_eq!(Bonded::::get(stash), Some(ctlr)); + + // Ledger is still keyed by controller. + let ledger_updated = Ledger::::get(ctlr).unwrap(); + assert_eq!(ledger_updated.stash, stash); + }) + } + + #[test] + fn deprecate_controller_batch_with_bad_state_ok() { + ExtBuilder::default().has_stakers(false).nominate(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // now let's deprecate all the controllers for all the existing ledgers. + let bounded_controllers: BoundedVec< + _, + ::MaxControllersInDeprecationBatch, + > = BoundedVec::try_from(vec![333, 444, 555, 777]).unwrap(); + + assert_ok!(Staking::deprecate_controller_batch( + RuntimeOrigin::root(), + bounded_controllers + )); + + assert_eq!( + *staking_events().last().unwrap(), + Event::ControllerBatchDeprecated { failures: 0 } + ); + }) + } + + #[test] + fn deprecate_controller_batch_with_bad_state_failures() { + ExtBuilder::default().has_stakers(false).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // now let's deprecate all the controllers for all the existing ledgers. + let bounded_controllers: BoundedVec< + _, + ::MaxControllersInDeprecationBatch, + > = BoundedVec::try_from(vec![777, 555, 444, 333]).unwrap(); + + assert_ok!(Staking::deprecate_controller_batch( + RuntimeOrigin::root(), + bounded_controllers + )); + + assert_eq!( + *staking_events().last().unwrap(), + Event::ControllerBatchDeprecated { failures: 2 } + ); + }) + } +} + +mod ledger_recovery { + use super::*; + + #[test] + fn inspect_recovery_ledger_simple_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // non corrupted ledger. + assert_eq!(Staking::inspect_bond_state(&11).unwrap(), LedgerIntegrityState::Ok); + + // non bonded stash. + assert!(Bonded::::get(&1111).is_none()); + assert!(Staking::inspect_bond_state(&1111).is_err()); + + // double bonded but not corrupted. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + }) + } + + #[test] + fn inspect_recovery_ledger_corupted_killed_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = asset::staked::(&333); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(333) + // (444, 444) -> corrupted and None. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the corrupted ledger that is associated with stash 333. + assert_ok!(StakingLedger::::kill(&333)); + + // 333 bond is no more but it returns `BadState` because the lock on this stash is + // still set (see checks below). + assert_eq!(Staking::inspect_bond_state(&333), Err(Error::::BadState)); + // now the *other* ledger associated with 444 has been corrupted and killed (None). + assert_eq!( + Staking::inspect_bond_state(&444), + Ok(LedgerIntegrityState::CorruptedKilled) + ); + + // side effects on 333 - ledger, bonded, payee, lock should be completely empty. + // however, 333 lock remains. + assert_eq!(asset::staked::(&333), lock_333_before); // NOK + assert!(Bonded::::get(&333).is_none()); // OK + assert!(Payee::::get(&333).is_none()); // OK + assert!(Ledger::::get(&444).is_none()); // OK + + // side effects on 444 - ledger, bonded, payee, lock should remain be intact. + // however, 444 lock was removed. + assert_eq!(asset::staked::(&444), 0); // NOK + assert!(Bonded::::get(&444).is_some()); // OK + assert!(Payee::::get(&444).is_some()); // OK + assert!(Ledger::::get(&555).is_none()); // NOK + + assert!(Staking::do_try_state(System::block_number()).is_err()); + }) + } + + #[test] + fn inspect_recovery_ledger_corupted_killed_other_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = asset::staked::(&333); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(444) + // (333, 444) -> corrupted and None + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the *other* ledger that is double bonded but not corrupted. + assert_ok!(StakingLedger::::kill(&444)); + + // now 333 is corrupted and None through the *other* ledger being killed. + assert_eq!( + Staking::inspect_bond_state(&333).unwrap(), + LedgerIntegrityState::CorruptedKilled, + ); + // 444 is cleaned and not a stash anymore; no lock left behind. + assert_eq!(Ledger::::get(&444), None); + assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); + + // side effects on 333 - ledger, bonded, payee, lock should be intact. + assert_eq!(asset::staked::(&333), lock_333_before); // OK + assert_eq!(Bonded::::get(&333), Some(444)); // OK + assert!(Payee::::get(&333).is_some()); + // however, ledger associated with its controller was killed. + assert!(Ledger::::get(&444).is_none()); // NOK + + // side effects on 444 - ledger, bonded, payee, lock should be completely removed. + assert_eq!(asset::staked::(&444), 0); // OK + assert!(Bonded::::get(&444).is_none()); // OK + assert!(Payee::::get(&444).is_none()); // OK + assert!(Ledger::::get(&555).is_none()); // OK + + assert!(Staking::do_try_state(System::block_number()).is_err()); + }) + } + + #[test] + fn inspect_recovery_ledger_lock_corrupted_works() { + ExtBuilder::default().has_stakers(true).try_state(false).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into lock corrupted ledger state by bond_extra on a ledger that is double bonded + // with a corrupted ledger. + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // bond_extra(333, 10) -> lock corrupted on 444 + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + bond_extra_no_checks(&333, 10); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 ledger is not corrupted but locks got out of sync. + assert_eq!( + Staking::inspect_bond_state(&444).unwrap(), + LedgerIntegrityState::LockCorrupted + ); + }) + } + + // Corrupted ledger restore. + // + // * Double bonded and corrupted ledger. + #[test] + fn restore_ledger_corrupted_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into corrupted and killed ledger state. + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted and killed ledger restore. + // + // * Double bonded and corrupted ledger. + // * Ledger killed by own controller. + #[test] + fn restore_ledger_corrupted_killed_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // ledger.total == lock + let total_444_before_corruption = asset::staked::(&444); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(333) + // (444, 444) -> corrupted and None. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // kill the corrupted ledger that is associated with stash 333. + assert_ok!(StakingLedger::::kill(&333)); + + // 333 bond is no more but it returns `BadState` because the lock on this stash is + // still set (see checks below). + assert_eq!(Staking::inspect_bond_state(&333), Err(Error::::BadState)); + // now the *other* ledger associated with 444 has been corrupted and killed (None). + assert!(Staking::ledger(StakingAccount::Stash(444)).is_err()); + + // try-state should fail. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // for the try-state checks to pass, we also need to recover the stash 444 which is + // corrupted too by proxy of kill(333). Currently, both the lock and the ledger of 444 + // have been cleared so we need to provide the new amount to restore the ledger. + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 444, None, None, None), + Error::::CannotRestoreLedger + ); + + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 444, + None, + Some(total_444_before_corruption), + None, + )); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted and killed by *other* ledger restore. + // + // * Double bonded and corrupted ledger. + // * Ledger killed by own controller. + #[test] + fn restore_ledger_corrupted_killed_other_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // kill(444) + // (333, 444) -> corrupted and None + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // 333 is corrupted since it's controller is linking 444 ledger. + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // 444 however is OK. + assert_eq!(Staking::inspect_bond_state(&444).unwrap(), LedgerIntegrityState::Ok); + + // kill the *other* ledger that is double bonded but not corrupted. + assert_ok!(StakingLedger::::kill(&444)); + + // recover the ledger bonded by 333 stash. + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + + // 444 does not need recover in this case since it's been killed successfully. + assert_eq!(Staking::inspect_bond_state(&444), Err(Error::::NotStash)); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } + + // Corrupted with bond_extra. + // + // * Double bonded and corrupted ledger. + // * Corrupted ledger calls `bond_extra` + #[test] + fn restore_ledger_corrupted_bond_extra_works() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + + let lock_333_before = asset::staked::(&333); + let lock_444_before = asset::staked::(&444); + + // get into corrupted and killed ledger state by killing a corrupted ledger: + // init state: + // (333, 444) + // (444, 555) + // set_controller(444) to 444 + // (333, 444) -> corrupted + // (444, 444) + // bond_extra(444, 40) -> OK + // bond_extra(333, 30) -> locks out of sync + + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + + // now try-state fails. + assert!(Staking::do_try_state(System::block_number()).is_err()); + + // if 444 bonds extra, the locks remain in sync. + bond_extra_no_checks(&444, 40); + assert_eq!(asset::staked::(&333), lock_333_before); + assert_eq!(asset::staked::(&444), lock_444_before + 40); + + // however if 333 bonds extra, the wrong lock is updated. + bond_extra_no_checks(&333, 30); + assert_eq!(asset::staked::(&333), lock_444_before + 40 + 30); //not OK + assert_eq!(asset::staked::(&444), lock_444_before + 40); // OK + + // recover the ledger bonded by 333 stash. Note that the total/lock needs to be + // re-written since on-chain data lock has become out of sync. + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 333, + None, + Some(lock_333_before + 30), + None + )); + + // now recover 444 that although it's not corrupted, its lock and ledger.total are out + // of sync. in which case, we need to explicitly set the ledger's lock and amount, + // otherwise the ledger recover will fail. + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 444, None, None, None), + Error::::CannotRestoreLedger + ); + + //and enforcing a new ledger lock/total on this non-corrupted ledger will work. + assert_ok!(Staking::restore_ledger( + RuntimeOrigin::root(), + 444, + None, + Some(lock_444_before + 40), + None + )); + + // double-check that ledgers got to expected state and bond_extra done during the + // corrupted state is part of the recovered ledgers. + let ledger_333 = Bonded::::get(&333).and_then(Ledger::::get).unwrap(); + let ledger_444 = Bonded::::get(&444).and_then(Ledger::::get).unwrap(); + + assert_eq!(ledger_333.total, lock_333_before + 30); + assert_eq!(asset::staked::(&333), ledger_333.total); + assert_eq!(ledger_444.total, lock_444_before + 40); + assert_eq!(asset::staked::(&444), ledger_444.total); + + // try-state checks are ok now. + assert_ok!(Staking::do_try_state(System::block_number())); + }) + } +} diff --git a/substrate/frame/staking-async/src/tests/mod.rs b/substrate/frame/staking-async/src/tests/mod.rs new file mode 100644 index 0000000000000..865a2d3d6e45c --- /dev/null +++ b/substrate/frame/staking-async/src/tests/mod.rs @@ -0,0 +1,1280 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for the module. + +use super::*; +use crate::{asset, ledger::StakingLedgerInspect, mock::Session}; +use frame_election_provider_support::{ + bounds::{DataProviderBounds, ElectionBoundsBuilder}, + SortedListProvider, +}; +use frame_support::{ + assert_noop, assert_ok, assert_storage_noop, hypothetically, + pallet_prelude::*, + traits::{InspectLockableCurrency, ReservableCurrency}, +}; +use mock::*; +use sp_runtime::{ + assert_eq_error_rate, bounded_vec, traits::BadOrigin, Perbill, Percent, TokenError, +}; +use sp_staking::{Stake, StakingAccount, StakingInterface}; +use substrate_test_utils::assert_eq_uvec; + +mod bonding; +mod configs; +mod controller; +mod election_data_provider; +mod election_provider; +mod era_rotation; +mod force_unstake_kill_stash; +mod ledger; +mod payout_stakers; +mod slashing; + +#[test] +fn basic_setup_session_queuing_should_work() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert_eq!(Session::current_index(), 3); + + // put some money in account that we'll use. + for i in 1..5 { + let _ = asset::set_stakeable_balance::(&i, 2000); + } + + // add a new candidate for being a validator. account 3. + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Account(3))); + assert_ok!(Staking::validate(RuntimeOrigin::signed(3), ValidatorPrefs::default())); + + // No effects will be seen so far. + assert_eq_uvec!(Session::validators(), vec![21, 11]); + + Session::roll_until_session(4); + assert_eq_uvec!(Session::validators(), vec![21, 11]); + assert_eq!(Session::queued_validators(), None); + + Session::roll_until_session(5); + assert_eq_uvec!(Session::validators(), vec![21, 11]); + assert_eq_uvec!(Session::queued_validators().unwrap(), vec![21, 3]); + + Session::roll_until_session(6); + assert_eq_uvec!(Session::validators(), vec![21, 3]); + assert_eq!(Session::queued_validators(), None); + + // then chill 3 + Staking::chill(RuntimeOrigin::signed(3)).unwrap(); + + // nothing. 3 is still there. + Session::roll_until_session(7); + assert_eq_uvec!(Session::validators(), vec![21, 3]); + + Session::roll_until_session(8); + assert_eq_uvec!(Session::validators(), vec![21, 3]); + + // now are back -- 3 is gone + Session::roll_until_session(9); + assert_eq_uvec!(Session::validators(), vec![21, 11]); + + // 3 is still bonded though + assert_eq!( + Staking::ledger(3.into()).unwrap(), + StakingLedgerInspect { + stash: 3, + total: 1500, + active: 1500, + unlocking: Default::default(), + } + ); + + // e.g. it cannot reserve more than 500 that it has free from the total 2000 + assert_noop!(Balances::reserve(&3, 501), DispatchError::ConsumerRemaining); + assert_ok!(Balances::reserve(&3, 409)); + }); +} + +#[test] +fn basic_setup_works() { + // Verifies initial conditions of mock + ExtBuilder::default().build_and_execute(|| { + // Account 11 is stashed and locked, and is the controller + assert_eq!(Staking::bonded(&11), Some(11)); + // Account 21 is stashed and locked and is the controller + assert_eq!(Staking::bonded(&21), Some(21)); + // Account 1 is not a stashed + assert_eq!(Staking::bonded(&1), None); + + // Account 11 controls its own stash, which is 100 * balance_factor units + assert_eq!( + Ledger::get(&11).unwrap(), + StakingLedgerInspect:: { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + // Account 21 controls its own stash, which is 200 * balance_factor units + assert_eq!( + Ledger::get(&21).unwrap(), + StakingLedgerInspect:: { + stash: 21, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + // Account 1 does not control any stash + assert!(Staking::ledger(1.into()).is_err()); + + // ValidatorPrefs are default + assert_eq_uvec!( + >::iter().collect::>(), + vec![ + (31, ValidatorPrefs::default()), + (21, ValidatorPrefs::default()), + (11, ValidatorPrefs::default()) + ] + ); + + // check the single nominators we have + assert_eq!( + Staking::ledger(101.into()).unwrap(), + StakingLedgerInspect { + stash: 101, + total: 500, + active: 500, + unlocking: Default::default(), + } + ); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + + assert_eq!( + Staking::eras_stakers(active_era(), &11), + Exposure { + total: 1250, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 250 }] + }, + ); + assert_eq!( + Staking::eras_stakers(active_era(), &21), + Exposure { + total: 1250, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 250 }] + }, + ); + + // Current active and planned era + assert_eq!(active_era(), 1); + assert_eq!(current_era(), 1); + assert_eq!(Session::current_index(), 3); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + + // initial total stake = 1125 + 1375 + assert_eq!(ErasTotalStake::::get(active_era()), 2500); + + // The number of validators required. + assert_eq!(ValidatorCount::::get(), 2); + + // New era is not being forced + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + + // Events so far + assert_eq!( + staking_events(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 1 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 3, active_era: 1, planned_era: 1 } + ] + ); + }); +} + +#[test] +fn basic_setup_session_rotation() { + ExtBuilder::default().build_and_execute(|| { + // our initial clean state at active era 1. + assert_eq!(active_era(), 1); + assert_eq!(current_era(), 1); + assert_eq!(Session::current_index(), 3); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + + // roll one session, we have planned our era + Session::roll_to_next_session(); + assert_eq!(Session::current_index(), 4); + assert_eq!(current_era(), 2); + assert_eq!(active_era(), 1); + + // roll one session, still in planning, and something is queued in session now. + Session::roll_to_next_session(); + assert_eq!(Session::current_index(), 5); + assert_eq!(active_era(), 1); + assert_eq!(current_era(), 2); + + // roll one session, we activate the era. + Session::roll_to_next_session(); + assert_eq!(Session::current_index(), 6); + assert_eq!(active_era(), 2); + assert_eq!(current_era(), 2); + }); +} + +#[test] +fn basic_setup_sessions_per_era() { + ExtBuilder::default() + .session_per_era(6) + .no_flush_events() + .build_and_execute(|| { + // test state forwards us to the end of session 6 / active era 1 + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 1 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 } + ] + ); + assert_eq!(Session::current_index(), 6); + assert_eq!(active_era(), 1); + + Session::roll_until_active_era(2); + assert_eq!(Session::current_index(), 12); + assert_eq!(active_era(), 2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 9, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 10, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 11, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 12, active_era: 2, planned_era: 2 } + ] + ); + }); +} + +mod try_state_assertions { + use super::*; + #[test] + #[should_panic] + fn count_check_works() { + ExtBuilder::default().build_and_execute(|| { + // We should never insert into the validators or nominators map directly as this will + // not keep track of the count. This test should panic as we verify the count is + // accurate after every test using the `post_checks` in `mock`. + Validators::::insert(987654321, ValidatorPrefs::default()); + Nominators::::insert( + 987654321, + Nominations { + targets: Default::default(), + submitted_in: Default::default(), + suppressed: false, + }, + ); + }) + } + + #[test] + #[should_panic = "called `Result::unwrap()` on an `Err` value: Other(\"number of entries in payee storage items does not match the number of bonded ledgers\")"] + fn check_payee_invariant1_works() { + // A bonded ledger should always have an assigned `Payee` This test should panic as we + // verify that a bad state will panic due to the `try_state` checks in the `post_checks` + // in `mock`. + ExtBuilder::default().build_and_execute(|| { + let rogue_ledger = StakingLedger::::new(123456, 20); + Ledger::::insert(123456, rogue_ledger); + }) + } + + #[test] + #[should_panic = "called `Result::unwrap()` on an `Err` value: Other(\"number of entries in payee storage items does not match the number of bonded ledgers\")"] + fn check_payee_invariant2_works() { + // The number of entries in both `Payee` and of bonded staking ledgers should match. This + // test should panic as we verify that a bad state will panic due to the `try_state` + // checks in the `post_checks` in `mock`. + ExtBuilder::default().build_and_execute(|| { + Payee::::insert(1111, RewardDestination::Staked); + }) + } +} + +mod validator_count { + use super::*; + + #[test] + fn increase_validator_count_errors() { + ExtBuilder::default().build_and_execute(|| { + MaxValidatorSet::set(50); + MaxWinnersPerPage::set(50); + assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 40)); + + // increase works + assert_ok!(Staking::increase_validator_count(RuntimeOrigin::root(), 6)); + assert_eq!(ValidatorCount::::get(), 46); + + // errors + assert_noop!( + Staking::increase_validator_count(RuntimeOrigin::root(), 5), + Error::::TooManyValidators, + ); + }) + } + + #[test] + fn scale_validator_count_errors() { + ExtBuilder::default().build_and_execute(|| { + MaxValidatorSet::set(50); + MaxWinnersPerPage::set(50); + assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 20)); + + // scale value works + assert_ok!(Staking::scale_validator_count( + RuntimeOrigin::root(), + Percent::from_percent(200) + )); + assert_eq!(ValidatorCount::::get(), 40); + + // errors + assert_noop!( + Staking::scale_validator_count(RuntimeOrigin::root(), Percent::from_percent(126)), + Error::::TooManyValidators, + ); + }) + } + + #[test] + fn cannot_set_unsupported_validator_count() { + ExtBuilder::default().build_and_execute(|| { + MaxValidatorSet::set(50); + MaxWinnersPerPage::set(50); + + // set validator count works + assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 30)); + assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 50)); + + // setting validator count above 100 does not work + assert_noop!( + Staking::set_validator_count(RuntimeOrigin::root(), 51), + Error::::TooManyValidators, + ); + }) + } +} + +mod staking_interface { + use frame_support::storage::with_storage_layer; + use sp_staking::StakingInterface; + + use super::*; + + #[test] + fn force_unstake_with_slash_works() { + ExtBuilder::default().build_and_execute(|| { + // without slash + let _ = with_storage_layer::<(), _, _>(|| { + // bond an account, can unstake + assert_eq!(Staking::bonded(&11), Some(11)); + assert_ok!(::force_unstake(11)); + Err(DispatchError::from("revert")) + }); + + // bond again and add a slash, still can unstake. + assert_eq!(Staking::bonded(&11), Some(11)); + add_slash(11); + assert_ok!(::force_unstake(11)); + }); + } + + #[test] + fn do_withdraw_unbonded_with_wrong_slash_spans_works_as_expected() { + ExtBuilder::default().build_and_execute(|| { + // add a slash and go forward one block so that it is computed, and slashing spans are + // created. + add_slash_with_percent(11, 100); + Session::roll_next(); + + assert_eq!(Staking::bonded(&11), Some(11)); + + assert_noop!( + Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0), + Error::::IncorrectSlashingSpans + ); + + let num_slashing_spans = + SlashingSpans::::get(&11).map_or(0, |s| s.iter().count()); + + assert_ok!(Staking::withdraw_unbonded( + RuntimeOrigin::signed(11), + num_slashing_spans as u32 + )); + }); + } + + #[test] + fn do_withdraw_unbonded_can_kill_stash_with_existential_deposit_zero() { + ExtBuilder::default() + .existential_deposit(0) + .nominate(false) + .build_and_execute(|| { + // Initial state of 11 + assert_eq!(Staking::bonded(&11), Some(11)); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + assert_eq!( + Staking::eras_stakers(active_era(), &11), + Exposure { total: 1000, own: 1000, others: vec![] } + ); + + // Unbond all of the funds in stash. + Staking::chill(RuntimeOrigin::signed(11)).unwrap(); + Staking::unbond(RuntimeOrigin::signed(11), 1000).unwrap(); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 0, + unlocking: bounded_vec![UnlockChunk { value: 1000, era: 4 }], + }, + ); + + // trigger future era. + Session::roll_until_active_era(4); + + // withdraw unbonded + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(11), 0)); + + // empty stash has been reaped + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + // lock is removed. + assert_eq!(asset::staked::(&11), 0); + }); + } + + #[test] + fn status() { + ExtBuilder::default().build_and_execute(|| { + // stash of a validator is identified as a validator + assert_eq!(Staking::status(&11).unwrap(), StakerStatus::Validator); + // .. but not the controller. + assert!(Staking::status(&10).is_err()); + + // stash of nominator is identified as a nominator + assert_eq!(Staking::status(&101).unwrap(), StakerStatus::Nominator(vec![11, 21])); + // .. but not the controller. + assert!(Staking::status(&100).is_err()); + + // stash of chilled is identified as a chilled + assert_eq!(Staking::status(&41).unwrap(), StakerStatus::Idle); + // .. but not the controller. + assert!(Staking::status(&40).is_err()); + + // random other account. + assert!(Staking::status(&42).is_err()); + }) + } +} + +mod staking_unchecked { + use sp_staking::{Stake, StakingInterface, StakingUnchecked}; + + use super::*; + + #[test] + fn virtual_bond_does_not_lock_or_hold() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(asset::total_balance::(&10), 0); + + // 10 can bond more than its balance amount since we do not require lock for virtual + // bonding. + assert_ok!(::virtual_bond(&10, 100, &15)); + + // nothing is locked on 10. + assert_eq!(asset::staked::(&10), 0); + + // adding more balance does not lock anything as well. + assert_ok!(::bond_extra(&10, 1000)); + + // but ledger is updated correctly. + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 1100 }) + ); + + // lets try unbonding some amount. + assert_ok!(::unbond(&10, 200)); + + assert_eq!( + Staking::ledger(10.into()).unwrap(), + StakingLedgerInspect { + stash: 10, + total: 1100, + active: 1100 - 200, + unlocking: bounded_vec![UnlockChunk { value: 200, era: 1 + 3 }], + } + ); + + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 900 }) + ); + + // still no locks. + assert_eq!(asset::staked::(&10), 0); + + Session::roll_until_active_era(2); + + // cannot withdraw without waiting for unbonding period. + assert_ok!(::withdraw_unbonded(10, 0)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 900 }) + ); + + // in era 4, 10 can withdraw unlocking amount. + Session::roll_until_active_era(4); + assert_ok!(::withdraw_unbonded(10, 0)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 900, active: 900 }) + ); + + // unbond all. + assert_ok!(::unbond(&10, 900)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 900, active: 0 }) + ); + + Session::roll_until_active_era(7); + assert_ok!(::withdraw_unbonded(10, 0)); + + // ensure withdrawing all amount cleans up storage. + assert_eq!(Staking::ledger(10.into()), Err(Error::::NotStash)); + assert_eq!(VirtualStakers::::contains_key(10), false); + }) + } + + #[test] + fn virtual_staker_cannot_pay_reward_to_self_account() { + ExtBuilder::default().build_and_execute(|| { + // cannot set payee to self + assert_noop!( + ::virtual_bond(&10, 100, &10), + Error::::RewardDestinationRestricted + ); + + // to another account works + assert_ok!(::virtual_bond(&10, 100, &11)); + + // cannot set via set_payee as well. + assert_noop!( + ::set_payee(&10, &10), + Error::::RewardDestinationRestricted + ); + }); + } + + #[test] + fn virtual_staker_cannot_bond_again() { + ExtBuilder::default().build_and_execute(|| { + // 200 virtual bonds + bond_virtual_nominator(200, 201, 500, vec![11, 21]); + + // Tries bonding again + assert_noop!( + ::virtual_bond(&200, 200, &201), + Error::::AlreadyBonded + ); + + // And again with a different reward destination. + assert_noop!( + ::virtual_bond(&200, 200, &202), + Error::::AlreadyBonded + ); + + // Direct bond is not allowed as well. + assert_noop!( + ::bond(&200, 200, &202), + Error::::AlreadyBonded + ); + }); + } + + #[test] + fn normal_staker_cannot_virtual_bond() { + ExtBuilder::default().build_and_execute(|| { + // 101 is a nominator trying to virtual bond + assert_noop!( + ::virtual_bond(&101, 200, &102), + Error::::AlreadyBonded + ); + + // validator 21 tries to virtual bond + assert_noop!( + ::virtual_bond(&21, 200, &22), + Error::::AlreadyBonded + ); + }); + } + + #[test] + fn migrate_virtual_staker() { + ExtBuilder::default().build_and_execute(|| { + // give some balance to 200 + asset::set_stakeable_balance::(&200, 2000); + + // stake + assert_ok!(Staking::bond(RuntimeOrigin::signed(200), 1000, RewardDestination::Staked)); + assert_eq!(asset::staked::(&200), 1000); + + // migrate them to virtual staker + assert_ok!(::migrate_to_virtual_staker(&200)); + // payee needs to be updated to a non-stash account. + assert_ok!(::set_payee(&200, &201)); + + // ensure the balance is not locked anymore + assert_eq!(asset::staked::(&200), 0); + + // and they are marked as virtual stakers + assert_eq!(Pallet::::is_virtual_staker(&200), true); + }); + } + + #[test] + fn virtual_nominators_are_lazily_slashed() { + ExtBuilder::default().build_and_execute(|| { + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + assert_ok!(::migrate_to_virtual_staker(&101)); + // set payee different to self. + assert_ok!(::set_payee(&101, &102)); + + // cache values + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = asset::stakeable_balance::(&101); + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = asset::stakeable_balance::(&11); + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 gets slashed + add_slash_with_percent(11, 5); + // so that slashes are applied + Session::roll_next(); + + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); + + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); + + // validator balance is slashed as usual + assert_eq!(asset::stakeable_balance::(&11), validator_balance - validator_share); + + // but virtual nominator's balance is not slashed. + assert_eq!(asset::stakeable_balance::(&101), nominator_balance); + // but slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); + }) + } + + #[test] + fn virtual_stakers_cannot_be_reaped() { + ExtBuilder::default() + .set_status(101, StakerStatus::Nominator(vec![11])) + .build_and_execute(|| { + // slash all stake. + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + assert_ok!(::migrate_to_virtual_staker(&101)); + // set payee different to self. + assert_ok!(::set_payee(&101, &102)); + + // cache values + let validator_balance = asset::stakeable_balance::(&11); + let validator_stake = Staking::ledger(11.into()).unwrap().total; + let nominator_balance = asset::stakeable_balance::(&101); + let nominator_stake = Staking::ledger(101.into()).unwrap().total; + + // 11 gets slashed + add_slash_with_percent(11, 100); + // so that slashes are applied + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(100), + }, + Event::SlashComputed { + offence_era: 1, + slash_era: 1, + offender: 11, + page: 0 + }, + Event::Slashed { staker: 11, amount: 1000 }, + Event::Slashed { staker: 101, amount: 500 } + ] + ); + + // both stakes must have been decreased to 0. + assert_eq!(Staking::ledger(11.into()).unwrap().active, 0); + assert_eq!(Staking::ledger(101.into()).unwrap().active, 0); + + // all validator stake is slashed + assert_eq_error_rate!( + validator_balance - validator_stake, + asset::stakeable_balance::(&11), + 1 + ); + + // Virtual nominator's balance is not slashed. + assert_eq!(asset::stakeable_balance::(&101), nominator_balance); + // Slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_stake); + + // validator can be reaped. + assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(10), 11, u32::MAX)); + // nominator is a virtual staker and cannot be reaped. + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(10), 101, u32::MAX), + Error::::VirtualStakerNotAllowed + ); + }) + } + + #[test] + fn restore_ledger_not_allowed_for_virtual_stakers() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + setup_double_bonded_ledgers(); + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Ok); + set_controller_no_checks(&444); + // 333 is corrupted + assert_eq!(Staking::inspect_bond_state(&333).unwrap(), LedgerIntegrityState::Corrupted); + // migrate to virtual staker. + assert_ok!(::migrate_to_virtual_staker(&333)); + + // recover the ledger won't work for virtual staker + assert_noop!( + Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None), + Error::::VirtualStakerNotAllowed + ); + + // migrate 333 back to normal staker + >::remove(333); + + // try restore again + assert_ok!(Staking::restore_ledger(RuntimeOrigin::root(), 333, None, None, None)); + }) + } +} + +mod hold_migration { + use super::*; + + #[test] + fn ledger_update_creates_hold() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + // GIVEN alice who is a nominator with old currency + let alice = 300; + bond_nominator(alice, 1000, vec![11]); + assert_eq!(asset::staked::(&alice), 1000); + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 0); + // migrate alice currency to legacy locks + testing_utils::migrate_to_old_currency::(alice); + // no more holds + assert_eq!(asset::staked::(&alice), 0); + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 1000); + assert_eq!( + ::stake(&alice), + Ok(Stake { total: 1000, active: 1000 }) + ); + + // any ledger mutation should create a hold + hypothetically!({ + // give some extra balance to alice. + let _ = asset::mint_into_existing::(&alice, 100); + + // WHEN new fund is bonded to ledger. + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(alice), 100)); + + // THEN new hold is created + assert_eq!(asset::staked::(&alice), 1000 + 100); + assert_eq!( + ::stake(&alice), + Ok(Stake { total: 1100, active: 1100 }) + ); + + // old locked balance is untouched + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 1000); + }); + + hypothetically!({ + // WHEN new fund is unbonded from ledger. + assert_ok!(Staking::unbond(RuntimeOrigin::signed(alice), 100)); + + // THEN hold is updated. + assert_eq!(asset::staked::(&alice), 1000); + assert_eq!( + ::stake(&alice), + Ok(Stake { total: 1000, active: 900 }) + ); + + // old locked balance is untouched + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 1000); + }); + + // WHEN alice currency is migrated. + assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), alice)); + + // THEN hold is updated. + assert_eq!(asset::staked::(&alice), 1000); + assert_eq!( + ::stake(&alice), + Ok(Stake { total: 1000, active: 1000 }) + ); + + // ensure cannot migrate again. + assert_noop!( + Staking::migrate_currency(RuntimeOrigin::signed(1), alice), + Error::::AlreadyMigrated + ); + + // locked balance is removed + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 0); + }); + } + + #[test] + fn migrate_removes_old_lock() { + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + // GIVEN alice who is a nominator with old currency + let alice = 300; + bond_nominator(alice, 1000, vec![11]); + testing_utils::migrate_to_old_currency::(alice); + assert_eq!(asset::staked::(&alice), 0); + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 1000); + let pre_migrate_consumer = System::consumers(&alice); + let _ = staking_events_since_last_call(); + + // WHEN alice currency is migrated. + assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), alice)); + + // THEN + // the extra consumer from old code is removed. + assert_eq!(System::consumers(&alice), pre_migrate_consumer - 1); + // ensure no lock + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 0); + // ensure stake and hold are same. + assert_eq!( + ::stake(&alice), + Ok(Stake { total: 1000, active: 1000 }) + ); + assert_eq!(asset::staked::(&alice), 1000); + // ensure events are emitted. + assert_eq!( + staking_events_since_last_call(), + vec![Event::CurrencyMigrated { stash: alice, force_withdraw: 0 }] + ); + + // ensure cannot migrate again. + assert_noop!( + Staking::migrate_currency(RuntimeOrigin::signed(1), alice), + Error::::AlreadyMigrated + ); + }); + } + #[test] + fn cannot_hold_all_stake() { + // When there is not enough funds to hold all stake, part of the stake if force withdrawn. + // At end of the migration, the stake and hold should be same. + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + // GIVEN alice who is a nominator with old currency. + let alice = 300; + let stake = 1000; + bond_nominator(alice, stake, vec![11]); + testing_utils::migrate_to_old_currency::(alice); + assert_eq!(asset::staked::(&alice), 0); + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), stake); + // ledger has 1000 staked. + assert_eq!( + ::stake(&alice), + Ok(Stake { total: stake, active: stake }) + ); + + // Get rid of the extra ED to emulate all their balance including ED is staked. + assert_ok!(Balances::transfer_allow_death( + RuntimeOrigin::signed(alice), + 10, + ExistentialDeposit::get() + )); + + let expected_force_withdraw = ExistentialDeposit::get(); + + // ledger mutation would fail in this case before migration because of failing hold. + assert_noop!( + Staking::unbond(RuntimeOrigin::signed(alice), 100), + Error::::NotEnoughFunds + ); + + // clear events + let _ = staking_events_since_last_call(); + + // WHEN alice currency is migrated. + assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), alice)); + + // THEN + let expected_hold = stake - expected_force_withdraw; + // ensure no lock + assert_eq!(Balances::balance_locked(STAKING_ID, &alice), 0); + // ensure stake and hold are same. + assert_eq!( + ::stake(&alice), + Ok(Stake { total: expected_hold, active: expected_hold }) + ); + assert_eq!(asset::staked::(&alice), expected_hold); + // ensure events are emitted. + assert_eq!( + staking_events_since_last_call(), + vec![Event::CurrencyMigrated { + stash: alice, + force_withdraw: expected_force_withdraw + }] + ); + + // ensure cannot migrate again. + assert_noop!( + Staking::migrate_currency(RuntimeOrigin::signed(1), alice), + Error::::AlreadyMigrated + ); + + // unbond works after migration. + assert_ok!(Staking::unbond(RuntimeOrigin::signed(alice), 100)); + }); + } + + #[test] + fn virtual_staker_consumer_provider_dec() { + // Ensure virtual stakers consumer and provider count is decremented. + ExtBuilder::default().has_stakers(true).build_and_execute(|| { + // 200 virtual bonds + bond_virtual_nominator(200, 201, 500, vec![11, 21]); + + // previously the virtual nominator had a provider inc by the delegation system as + // well as a consumer by this pallet. + System::inc_providers(&200); + System::inc_consumers(&200).expect("has provider, can consume"); + + hypothetically!({ + // migrate 200 + assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), 200)); + + // ensure account does not exist in system anymore. + assert_eq!(System::consumers(&200), 0); + assert_eq!(System::providers(&200), 0); + assert!(!System::account_exists(&200)); + + // ensure cannot migrate again. + assert_noop!( + Staking::migrate_currency(RuntimeOrigin::signed(1), 200), + Error::::AlreadyMigrated + ); + }); + + hypothetically!({ + // 200 has an erroneously extra provider + System::inc_providers(&200); + + // causes migration to fail. + assert_noop!( + Staking::migrate_currency(RuntimeOrigin::signed(1), 200), + Error::::BadState + ); + }); + + // 200 is funded for more than ED by a random account. + assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(999), 200, 10)); + + // it has an extra provider now. + assert_eq!(System::providers(&200), 2); + + // migrate 200 + assert_ok!(Staking::migrate_currency(RuntimeOrigin::signed(1), 200)); + + // 1 provider is left, consumers is 0. + assert_eq!(System::providers(&200), 1); + assert_eq!(System::consumers(&200), 0); + + // ensure cannot migrate again. + assert_noop!( + Staking::migrate_currency(RuntimeOrigin::signed(1), 200), + Error::::AlreadyMigrated + ); + }); + } +} + +/* +#[test] +fn reward_validator_slashing_validator_does_not_overflow() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + let stake = u64::MAX as Balance * 2; + let reward_slash = u64::MAX as Balance * 2; + + // Assert multiplication overflows in balance arithmetic. + assert!(stake.checked_mul(reward_slash).is_none()); + + // Set staker + let _ = asset::set_stakeable_balance::(&11, stake); + + let reward = EraRewardPoints:: { + total: 1, + individual: vec![(11, 1)].into_iter().collect(), + }; + + // Check reward + ErasRewardPoints::::insert(0, reward); + + // force exposure metadata to account for the overflowing `stake`. + ErasStakersOverview::::insert( + current_era(), + 11, + PagedExposureMetadata { total: stake, own: stake, nominator_count: 0, page_count: 0 }, + ); + + // we want to slash only self-stake, confirm that no others exposed. + let full_exposure_after = Eras::::get_full_exposure(current_era(), &11); + assert_eq!(full_exposure_after.total, stake); + assert_eq!(full_exposure_after.others, vec![]); + + ErasValidatorReward::::insert(0, stake); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0)); + assert_eq!(asset::stakeable_balance::(&11), stake * 2); + + // ensure ledger has `stake` and no more. + Ledger::::insert( + 11, + StakingLedgerInspect { + stash: 11, + total: stake, + active: stake, + unlocking: Default::default(), + }, + ); + // Set staker (unsafe, can reduce balance below actual stake) + let _ = asset::set_stakeable_balance::(&11, stake); + let _ = asset::set_stakeable_balance::(&2, stake); + + // only slashes out of bonded stake are applied. without this line, it is 0. + Staking::bond(RuntimeOrigin::signed(2), stake - 1, RewardDestination::Staked).unwrap(); + + // Override metadata and exposures of 11 so that it exposes minmal self stake and `stake` - + // 1 from nominator 2. + ErasStakersOverview::::insert( + current_era(), + 11, + PagedExposureMetadata { total: stake, own: 1, nominator_count: 1, page_count: 1 }, + ); + + ErasStakersPaged::::insert( + (current_era(), &11, 0), + ExposurePage { + page_total: stake - 1, + others: vec![IndividualExposure { who: 2, value: stake - 1 }], + }, + ); + + // Check slashing + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true); + + assert_eq!(asset::stakeable_balance::(&11), stake - 1); + assert_eq!(asset::stakeable_balance::(&2), 1); + }) +} + +#[test] +fn validator_is_not_disabled_for_an_offence_in_previous_era() { + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); + + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true); + + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert!(is_disabled(11)); + + mock::start_active_era(2); + + // the validator is not disabled in the new era + Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); + + mock::start_active_era(3); + + // an offence committed in era 1 is reported in era 3 + on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(0)], 1, true); + + // the validator doesn't get disabled for an old offence + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(!is_disabled(11)); + + // and we are not forcing a new era + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + + on_offence_in_era( + &[offence_from(11, None)], + // NOTE: A 100% slash here would clean up the account, causing de-registration. + &[Perbill::from_percent(95)], + 1, + true, + ); + + // the validator doesn't get disabled again + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(!is_disabled(11)); + // and we are still not forcing a new era + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + }); +} + +#[test] +fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); + + // pre-slash balance + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); + + // 100 has approval for 11 as of now + assert!(Nominators::::get(101).unwrap().targets.contains(&11)); + + // 11 and 21 both have the support of 100 + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); + + assert_eq!(exposure_11.total, 1000 + 125); + assert_eq!(exposure_21.total, 1000 + 375); + + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PagedElectionProceeded { page: 0, result: Ok(7) }, + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::OffenceReported { + validator: 11, + fraction: Perbill::from_percent(10), + offence_era: 1 + }, + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, + ] + ); + + assert!(matches!( + session_events().as_slice(), + &[.., SessionEvent::ValidatorDisabled { validator: 11 }] + )); + + // post-slash balance + let nominator_slash_amount_11 = 125 / 10; + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&101), 2000 - nominator_slash_amount_11); + + // check that validator was disabled. + assert!(is_disabled(11)); + + // actually re-bond the slashed validator + assert_ok!(Staking::validate(RuntimeOrigin::signed(11), Default::default())); + + mock::start_active_era(2); + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); + + // 11's own expo is reduced. sum of support from 11 is less (448), which is 500 + // 900 + 146 + assert!(matches!(exposure_11, Exposure { own: 900, total: 1046, .. })); + // 1000 + 342 + assert!(matches!(exposure_21, Exposure { own: 1000, total: 1342, .. })); + assert_eq!(500 - 146 - 342, nominator_slash_amount_11); + }); +} +*/ diff --git a/substrate/frame/staking-async/src/tests/payout_stakers.rs b/substrate/frame/staking-async/src/tests/payout_stakers.rs new file mode 100644 index 0000000000000..79277ac856c68 --- /dev/null +++ b/substrate/frame/staking-async/src/tests/payout_stakers.rs @@ -0,0 +1,1693 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::session_rotation::Eras; +use frame_support::dispatch::{extract_actual_weight, GetDispatchInfo, WithPostDispatchInfo}; +use sp_runtime::traits::Dispatchable; + +#[test] +fn rewards_with_nominator_should_work() { + ExtBuilder::default().nominate(true).session_per_era(3).build_and_execute(|| { + let init_balance_11 = asset::total_balance::(&11); + let init_balance_21 = asset::total_balance::(&21); + let init_balance_101 = asset::total_balance::(&101); + + // Set payees + Payee::::insert(11, RewardDestination::Account(11)); + Payee::::insert(21, RewardDestination::Account(21)); + Payee::::insert(101, RewardDestination::Account(101)); + + Eras::::reward_active_era(vec![(11, 50)]); + Eras::::reward_active_era(vec![(11, 50)]); + // This is the second validator of the current elected set. + Eras::::reward_active_era(vec![(21, 50)]); + + // Compute total payout now for whole duration of the session. + let validator_payout_0 = validator_payout_for(time_per_era()); + let maximum_payout = total_payout_for(time_per_era()); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + + assert_eq!(asset::total_balance::(&11), init_balance_11); + assert_eq!(asset::total_balance::(&21), init_balance_21); + assert_eq!(asset::total_balance::(&101), init_balance_101); + assert_eq!( + ErasRewardPoints::::get(active_era()), + EraRewardPoints { + total: 50 * 3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + } + ); + let part_for_11 = Perbill::from_rational::(1000, 1250); + let part_for_21 = Perbill::from_rational::(1000, 1250); + let part_for_101_from_11 = Perbill::from_rational::(250, 1250); + let part_for_101_from_21 = Perbill::from_rational::(250, 1250); + + Session::roll_until_active_era(2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 4, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 2 }, + Event::EraPaid { + era_index: 1, + validator_payout: validator_payout_0, + remainder: maximum_payout - validator_payout_0 + }, + Event::SessionRotated { starting_session: 6, active_era: 2, planned_era: 2 } + ] + ); + assert_eq!(mock::RewardRemainderUnbalanced::get(), maximum_payout - validator_payout_0); + + // make note of total issuance before rewards. + let pre_issuance = asset::total_issuance::(); + + mock::make_all_reward_payment(1); + assert_eq!( + mock::staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Account(11), amount: 4000 }, + Event::Rewarded { stash: 101, dest: RewardDestination::Account(101), amount: 1000 }, + Event::PayoutStarted { era_index: 1, validator_stash: 21, page: 0, next: None }, + Event::Rewarded { stash: 21, dest: RewardDestination::Account(21), amount: 2000 }, + Event::Rewarded { stash: 101, dest: RewardDestination::Account(101), amount: 500 } + ] + ); + + // total issuance should have increased + let post_issuance = asset::total_issuance::(); + assert_eq!(post_issuance, pre_issuance + validator_payout_0); + + assert_eq_error_rate!( + asset::total_balance::(&11), + init_balance_11 + part_for_11 * validator_payout_0 * 2 / 3, + 2, + ); + assert_eq_error_rate!( + asset::total_balance::(&21), + init_balance_21 + part_for_21 * validator_payout_0 * 1 / 3, + 2, + ); + assert_eq_error_rate!( + asset::total_balance::(&101), + init_balance_101 + + part_for_101_from_11 * validator_payout_0 * 2 / 3 + + part_for_101_from_21 * validator_payout_0 * 1 / 3, + 2 + ); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + Eras::::reward_active_era(vec![(11, 1)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = validator_payout_for(time_per_era()); + + Session::roll_until_active_era(3); + + assert_eq!( + mock::RewardRemainderUnbalanced::get(), + maximum_payout * 2 - validator_payout_0 - total_payout_1, + ); + assert_eq!( + mock::staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 7, active_era: 2, planned_era: 3 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 8, active_era: 2, planned_era: 3 }, + Event::EraPaid { era_index: 2, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 9, active_era: 3, planned_era: 3 } + ] + ); + + mock::make_all_reward_payment(2); + assert_eq!(asset::total_issuance::(), post_issuance + total_payout_1); + + assert_eq_error_rate!( + asset::total_balance::(&11), + init_balance_11 + part_for_11 * (validator_payout_0 * 2 / 3 + total_payout_1), + 2, + ); + assert_eq_error_rate!( + asset::total_balance::(&21), + init_balance_21 + part_for_21 * validator_payout_0 * 1 / 3, + 2, + ); + assert_eq_error_rate!( + asset::total_balance::(&101), + init_balance_101 + + part_for_101_from_11 * (validator_payout_0 * 2 / 3 + total_payout_1) + + part_for_101_from_21 * validator_payout_0 * 1 / 3, + 2 + ); + }); +} + +#[test] +fn rewards_no_nominator_should_work() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert_eq_uvec!(Session::validators(), vec![11, 21]); + + // with no backers + assert_eq_uvec!( + era_exposures(1), + vec![ + (11, Exposure:: { total: 1000, own: 1000, others: vec![] }), + (21, Exposure:: { total: 1000, own: 1000, others: vec![] }) + ] + ); + + // give them some points + reward_all_elected(); + + // go to next active era + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + // payout era 1 + make_all_reward_payment(1); + + // payout works + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Staked, amount: 3750 }, + Event::PayoutStarted { era_index: 1, validator_stash: 21, page: 0, next: None }, + Event::Rewarded { stash: 21, dest: RewardDestination::Staked, amount: 3750 } + ] + ); + }); +} + +#[test] +fn nominating_and_rewards_should_work() { + ExtBuilder::default() + .nominate(false) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + // initial validators, note that 41 has more stake than 11 + assert_eq_uvec!(Session::validators(), vec![41, 21]); + + // bond two monitors, both favouring 11 + bond_nominator(1, 5000, vec![11, 41]); + bond_virtual_nominator(3, 333, 5000, vec![11]); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::Bonded { stash: 1, amount: 5000 }, + Event::Bonded { stash: 3, amount: 5000 }, + ] + ); + + // reward our two winning validators + Eras::::reward_active_era(vec![(41, 1)]); + Eras::::reward_active_era(vec![(21, 1)]); + + Session::roll_until_active_era(2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 4, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 6, active_era: 2, planned_era: 2 } + ] + ); + + // 11 now has more votes + assert_eq_uvec!(Session::validators(), vec![11, 41]); + assert_eq!(ErasStakersPaged::::iter_prefix_values((active_era(),)).count(), 2); + assert_eq!( + Staking::eras_stakers(active_era(), &11), + Exposure { + total: 7500, + own: 1000, + others: vec![ + IndividualExposure { who: 1, value: 1500 }, + IndividualExposure { who: 3, value: 5000 } + ] + } + ); + assert_eq!( + Staking::eras_stakers(active_era(), &41), + Exposure { + total: 7500, + own: 4000, + others: vec![IndividualExposure { who: 1, value: 3500 }] + } + ); + + // payout era 1, in which 21 and 41 were validators with no nominators. + mock::make_all_reward_payment(1); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 21, page: 0, next: None }, + Event::Rewarded { stash: 21, dest: RewardDestination::Staked, amount: 3750 }, + Event::PayoutStarted { era_index: 1, validator_stash: 41, page: 0, next: None }, + Event::Rewarded { stash: 41, dest: RewardDestination::Staked, amount: 3750 } + ] + ); + + reward_all_elected(); + Session::roll_until_active_era(3); + // ignore session rotation events, we've seen them before. + let _ = staking_events_since_last_call(); + + // for era 2 we had a nominator too, who is rewarded. + mock::make_all_reward_payment(2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 2, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Staked, amount: 500 }, + Event::Rewarded { stash: 1, dest: RewardDestination::Stash, amount: 750 }, + Event::Rewarded { + stash: 3, + dest: RewardDestination::Account(333), + amount: 2500 + }, + Event::PayoutStarted { era_index: 2, validator_stash: 41, page: 0, next: None }, + Event::Rewarded { stash: 41, dest: RewardDestination::Staked, amount: 2000 }, + Event::Rewarded { stash: 1, dest: RewardDestination::Stash, amount: 1750 } + ] + ); + }); +} + +#[test] +fn reward_destination_staked() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // initial conditions + assert!(Session::validators().contains(&11)); + assert_eq!(Staking::payee(11.into()), Some(RewardDestination::Staked)); + assert_eq!(asset::total_balance::(&11), 1001); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // reward era 1 and payout at era 2 + Eras::::reward_active_era(vec![(11, 1)]); + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + mock::make_all_reward_payment(1); + assert_eq!(ErasClaimedRewards::::get(1, &11), vec![0]); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Staked, amount: 7500 } + ] + ); + + // ledger must have been increased + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 8500, + active: 8500, + unlocking: Default::default(), + } + ); + // balance also updated + assert_eq!(asset::total_balance::(&11), 1001 + 7500); + }); +} + +#[test] +fn reward_to_stake_works() { + ExtBuilder::default() + .nominate(false) + .set_status(31, StakerStatus::Idle) + .set_status(41, StakerStatus::Idle) + .set_stake(21, 2000) + .try_state(false) + .build_and_execute(|| { + assert_eq!(ValidatorCount::::get(), 2); + // Confirm account 10 and 20 are validators + assert!(>::contains_key(&11) && >::contains_key(&21)); + + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000); + + // Give the man some money. + let _ = asset::set_stakeable_balance::(&10, 1000); + let _ = asset::set_stakeable_balance::(&20, 1000); + + // Bypass logic and change current exposure + Eras::::upsert_exposure(0, &21, Exposure { total: 69, own: 69, others: vec![] }); + >::insert( + &20, + StakingLedgerInspect { + stash: 21, + total: 69, + active: 69, + unlocking: Default::default(), + }, + ); + + // Compute total payout now for whole duration as other parameter won't change + let validator_payout_0 = validator_payout_for(time_per_era()); + Pallet::::reward_by_ids(vec![(11, 1)]); + Pallet::::reward_by_ids(vec![(21, 1)]); + + // New era --> rewards are paid --> stakes are changed + Session::roll_until_active_era(2); + make_all_reward_payment(1); + + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); + assert_eq!(Staking::eras_stakers(active_era(), &21).total, 2000); + + let _11_balance = asset::stakeable_balance::(&11); + assert_eq!(_11_balance, 1000 + validator_payout_0 / 2); + + // Trigger another new era as the info are frozen before the era start. + Session::roll_until_active_era(3); + + // -- new infos + assert_eq!( + Staking::eras_stakers(active_era(), &11).total, + 1000 + validator_payout_0 / 2 + ); + assert_eq!( + Staking::eras_stakers(active_era(), &21).total, + 2000 + validator_payout_0 / 2 + ); + }); +} + +#[test] +fn reward_destination_stash() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // initial conditions + assert!(Session::validators().contains(&11)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); + assert_eq!(asset::total_balance::(&11), 1001); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // reward era 1 and payout at era 2 + Eras::::reward_active_era(vec![(11, 1)]); + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + mock::make_all_reward_payment(1); + assert_eq!(ErasClaimedRewards::::get(1, &11), vec![0]); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Stash, amount: 7500 } + ] + ); + + // ledger same, balance increased + assert_eq!(asset::total_balance::(&11), 1001 + 7500); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + }); +} + +#[test] +fn reward_destination_account() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + // initial conditions + assert!(Session::validators().contains(&11)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Account(7))); + + assert_eq!(asset::total_balance::(&11), 1001); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + + // reward era 1 and payout at era 2 + Eras::::reward_active_era(vec![(11, 1)]); + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + mock::make_all_reward_payment(1); + assert_eq!(ErasClaimedRewards::::get(1, &11), vec![0]); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Account(7), amount: 7500 } + ] + ); + + // balance and ledger the same, 7 is unded + assert_eq!(asset::total_balance::(&11), 1001); + assert_eq!( + Staking::ledger(11.into()).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + } + ); + assert_eq!(asset::total_balance::(&7), 7500); + }); +} + +#[test] +fn validator_prefs_no_commission() { + ExtBuilder::default().build_and_execute(|| { + Eras::::reward_active_era(vec![(11, 1)]); + + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + mock::make_all_reward_payment(1); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Staked, amount: 6000 }, + Event::Rewarded { stash: 101, dest: RewardDestination::Staked, amount: 1500 } + ] + ); + }); +} + +#[test] +fn validator_prefs_100_commission() { + ExtBuilder::default().build_and_execute(|| { + let commission = Perbill::from_percent(100); + Eras::::reward_active_era(vec![(11, 1)]); + + Eras::::set_validator_prefs(1, &11, ValidatorPrefs { commission, ..Default::default() }); + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + mock::make_all_reward_payment(1); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Staked, amount: 7500 } + ] + ); + }); +} + +#[test] +fn validator_payment_some_commission_prefs_work() { + ExtBuilder::default().build_and_execute(|| { + let commission = Perbill::from_percent(40); + Eras::::reward_active_era(vec![(11, 1)]); + + Eras::::set_validator_prefs(1, &11, ValidatorPrefs { commission, ..Default::default() }); + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + mock::make_all_reward_payment(1); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Staked, amount: 6600 }, + Event::Rewarded { stash: 101, dest: RewardDestination::Staked, amount: 900 } + ] + ); + }); +} + +#[test] +fn min_commission_works() { + ExtBuilder::default().build_and_execute(|| { + // account 11 controls the stash of itself. + assert_ok!(Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } + )); + + // event emitted should be correct + assert_eq!( + *staking_events().last().unwrap(), + Event::ValidatorPrefsSet { + stash: 11, + prefs: ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } + } + ); + + assert_ok!(Staking::set_staking_configs( + RuntimeOrigin::root(), + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Remove, + ConfigOp::Set(Perbill::from_percent(10)), + ConfigOp::Noop, + )); + + // can't make it less than 10 now + assert_noop!( + Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } + ), + Error::::CommissionTooLow + ); + + // can only change to higher. + assert_ok!(Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(10), blocked: false } + )); + + assert_ok!(Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(15), blocked: false } + )); + }) +} + +#[test] +fn set_min_commission_works_with_admin_origin() { + ExtBuilder::default().build_and_execute(|| { + // no minimum commission set initially + assert_eq!(MinCommission::::get(), Zero::zero()); + + // root can set min commission + assert_ok!(Staking::set_min_commission(RuntimeOrigin::root(), Perbill::from_percent(10))); + + assert_eq!(MinCommission::::get(), Perbill::from_percent(10)); + + // Non privileged origin can not set min_commission + assert_noop!( + Staking::set_min_commission(RuntimeOrigin::signed(2), Perbill::from_percent(15)), + BadOrigin + ); + + // Admin Origin can set min commission + assert_ok!(Staking::set_min_commission( + RuntimeOrigin::signed(1), + Perbill::from_percent(15), + )); + + // setting commission below min_commission fails + assert_noop!( + Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(14), blocked: false } + ), + Error::::CommissionTooLow + ); + + // setting commission >= min_commission works + assert_ok!(Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(15), blocked: false } + )); + }) +} + +#[test] +fn force_apply_min_commission_works() { + let prefs = |c| ValidatorPrefs { commission: Perbill::from_percent(c), blocked: false }; + let validators = || Validators::::iter().collect::>(); + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Staking::validate(RuntimeOrigin::signed(31), prefs(10))); + assert_ok!(Staking::validate(RuntimeOrigin::signed(21), prefs(5))); + + // Given + assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); + MinCommission::::set(Perbill::from_percent(5)); + + // When applying to a commission greater than min + assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 31)); + // Then the commission is not changed + assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); + + // When applying to a commission that is equal to min + assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 21)); + // Then the commission is not changed + assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); + + // When applying to a commission that is less than the min + assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 11)); + // Then the commission is bumped to the min + assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(5))]); + + // When applying commission to a validator that doesn't exist then storage is not altered + assert_noop!( + Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 420), + Error::::NotStash + ); + }); +} + +#[test] +fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { + // should check that: + // * rewards get paid until history_depth for both validators and nominators + // * an invalid era to claim doesn't update last_reward + // * double claim of one era fails + ExtBuilder::default().nominate(true).build_and_execute(|| { + // Consumed weight for all payout_stakers dispatches that fail + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + + // Check state + Payee::::insert(11, RewardDestination::Account(11)); + Payee::::insert(101, RewardDestination::Account(101)); + + // reward for era 1 + Pallet::::reward_by_ids(vec![(11, 1)]); + + Session::roll_until_active_era(2); + + // reward for era 2 + Pallet::::reward_by_ids(vec![(11, 1)]); + + Session::roll_until_active_era(3); + + // reward for era 3 + Pallet::::reward_by_ids(vec![(11, 1)]); + + // go to the history depth era + Session::roll_until_active_era(HistoryDepth::get() + 1); + let _ = staking_events_since_last_call(); + + // Last kept is 1: + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0), + // Fail: Era out of history + Error::::InvalidEraToReward.with_weight(err_weight) + ); + + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 1, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Account(11), amount: 6000 }, + Event::Rewarded { stash: 101, dest: RewardDestination::Account(101), amount: 1500 } + ] + ); + + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0)); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::PayoutStarted { era_index: 2, validator_stash: 11, page: 0, next: None }, + Event::Rewarded { stash: 11, dest: RewardDestination::Account(11), amount: 6000 }, + Event::Rewarded { stash: 101, dest: RewardDestination::Account(101), amount: 1500 } + ] + ); + + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0), + // Fail: Double claim + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, active_era(), 0), + // Fail: Era ongoing + Error::::InvalidEraToReward.with_weight(err_weight) + ); + }); +} + +#[test] +fn nominators_over_max_exposure_page_size_are_rewarded() { + ExtBuilder::default().build_and_execute(|| { + // bond one nominator more than the max exposure page size to validator 11 in era 1 + for i in 0..=MaxExposurePageSize::get() { + let stash = 10_000 + i as AccountId; + let balance = 10_000 + i as Balance; + asset::set_stakeable_balance::(&stash, balance); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(stash), + balance, + RewardDestination::Stash + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(stash), vec![11])); + } + + // enact new staker set -- era 2 + Session::roll_until_active_era(2); + + // reward for era 2 + Pallet::::reward_by_ids(vec![(11, 1)]); + + Session::roll_until_active_era(3); + mock::make_all_reward_payment(2); + + // Assert nominators from 1 to Max are rewarded + let mut i: u32 = 0; + while i < MaxExposurePageSize::get() { + let stash = 10_000 + i as AccountId; + let balance = 10_000 + i as Balance; + assert!(asset::stakeable_balance::(&stash) > balance); + i += 1; + } + + // Assert overflowing nominators from page 1 are also rewarded + let stash = 10_000 + i as AccountId; + assert!(asset::stakeable_balance::(&stash) > (10_000 + i) as Balance); + }); +} + +#[test] +fn test_nominators_are_rewarded_for_all_exposure_page() { + ExtBuilder::default().build_and_execute(|| { + // 3 pages of exposure + let nominator_count = 2 * MaxExposurePageSize::get() + 1; + + for i in 0..nominator_count { + let stash = 10_000 + i as AccountId; + let balance = 10_000 + i as Balance; + asset::set_stakeable_balance::(&stash, balance); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(stash), + balance, + RewardDestination::Stash + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(stash), vec![11])); + } + + // enact + Session::roll_until_active_era(2); + + // give rewards + Pallet::::reward_by_ids(vec![(11, 1)]); + + Session::roll_until_active_era(3); + mock::make_all_reward_payment(2); + + assert_eq!(Eras::::exposure_page_count(2, &11), 3); + + // Assert all nominators are rewarded according to their stake + for i in 0..nominator_count { + // balance of the nominator after the reward payout. + let current_balance = asset::stakeable_balance::(&((10000 + i) as AccountId)); + // balance of the nominator in the previous iteration. + let previous_balance = asset::stakeable_balance::(&((10000 + i - 1) as AccountId)); + // balance before the reward. + let original_balance = 10_000 + i as Balance; + + assert!(current_balance > original_balance); + // since the stake of the nominator is increasing for each iteration, the final balance + // after the reward should also be higher than the previous iteration. + assert!(current_balance > previous_balance); + } + }); +} + +#[test] +fn test_multi_page_payout_stakers_by_page() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1000; + // Track the exposure of the validator and all nominators. + let mut total_exposure = balance; + + // Create a validator: + bond_validator(11, balance); // Default(64) + assert_eq!(Validators::::count(), 1); + + // Create nominators, targeting stash of validators + for i in 0..100 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + // with multi page reward payout, payout exposure is same as total exposure. + total_exposure += bond_amount; + } + + // enact the above changes + Session::roll_until_active_era(2); + // give rewards + Staking::reward_by_ids(vec![(11, 1)]); + + // 100 nominators fit into 2 pages of exposure + assert_eq!(MaxExposurePageSize::get(), 64); + assert_eq!(Eras::::exposure_page_count(2, &11), 2); + + // compute and ensure the reward amount is greater than zero. + let payout = validator_payout_for(time_per_era()); + Session::roll_until_active_era(3); + + // verify the exposures are calculated correctly. + let actual_exposure_0 = Eras::::get_paged_exposure(2, &11, 0).unwrap(); + assert_eq!(actual_exposure_0.total(), total_exposure); + assert_eq!(actual_exposure_0.own(), 1000); + assert_eq!(actual_exposure_0.others().len(), 64); + + let actual_exposure_1 = Eras::::get_paged_exposure(2, &11, 1).unwrap(); + assert_eq!(actual_exposure_1.total(), total_exposure); + // own stake is only included once in the first page + assert_eq!(actual_exposure_1.own(), 0); + assert_eq!(actual_exposure_1.others().len(), 100 - 64); + + let pre_payout_total_issuance = pallet_balances::TotalIssuance::::get(); + RewardOnUnbalanceWasCalled::set(false); + + // flush any events + let _ = staking_events_since_last_call(); + + let controller_balance_before_p0_payout = asset::stakeable_balance::(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0)); + + // verify `Rewarded` events are being executed + assert!(matches!( + staking_events_since_last_call().as_slice(), + &[ + Event::PayoutStarted { era_index: 2, validator_stash: 11, page: 0, next: Some(1) }, + .., + Event::Rewarded { stash: 1063, dest: RewardDestination::Stash, amount: _ }, + Event::Rewarded { stash: 1064, dest: RewardDestination::Stash, amount: _ }, + ] + )); + + let controller_balance_after_p0_payout = asset::stakeable_balance::(&11); + + // verify rewards have been paid out but still some left + assert!(pallet_balances::TotalIssuance::::get() > pre_payout_total_issuance); + assert!(pallet_balances::TotalIssuance::::get() < pre_payout_total_issuance + payout); + + // verify the validator has been rewarded + assert!(controller_balance_after_p0_payout > controller_balance_before_p0_payout); + + // Payout the second and last page of nominators + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 1)); + + // verify `Rewarded` events are being executed for the second page. + let events = staking_events_since_last_call(); + assert!(matches!( + events.as_slice(), + &[ + Event::PayoutStarted { era_index: 2, validator_stash: 11, page: 1, next: None }, + Event::Rewarded { stash: 1065, dest: RewardDestination::Stash, amount: _ }, + Event::Rewarded { stash: 1066, dest: RewardDestination::Stash, amount: _ }, + .. + ] + )); + + // verify the validator was not rewarded the second time + assert_eq!(asset::stakeable_balance::(&11), controller_balance_after_p0_payout); + + // verify all rewards have been paid out + assert_eq_error_rate!( + pallet_balances::TotalIssuance::::get(), + pre_payout_total_issuance + payout, + 2 + ); + assert!(RewardOnUnbalanceWasCalled::get()); + + // Top 64 nominators of validator 11 automatically paid out, including the validator + assert!(asset::stakeable_balance::(&11) > balance); + for i in 0..100 { + assert!(asset::stakeable_balance::(&(1000 + i)) > balance + i as Balance); + } + + // verify rewards are tracked to prevent double claims + for page in 0..Eras::::exposure_page_count(2, &11) { + assert_eq!(Eras::::is_rewards_claimed(2, &11, page), true); + } + + for i in 4..17 { + Staking::reward_by_ids(vec![(11, 1)]); + + // compute and ensure the reward amount is greater than zero. + let payout = validator_payout_for(time_per_era()); + let pre_payout_total_issuance = pallet_balances::TotalIssuance::::get(); + + Session::roll_until_active_era(i); + RewardOnUnbalanceWasCalled::set(false); + mock::make_all_reward_payment(i - 1); + assert_eq_error_rate!( + pallet_balances::TotalIssuance::::get(), + pre_payout_total_issuance + payout, + 2 + ); + assert!(RewardOnUnbalanceWasCalled::get()); + + // verify we track rewards for each era and page + for page in 0..Eras::::exposure_page_count(i - 1, &11) { + assert_eq!(Eras::::is_rewards_claimed(i - 1, &11, page), true); + } + } + + assert_eq!(ErasClaimedRewards::::get(14, &11), vec![0, 1]); + + let last_era = 99; + let history_depth = HistoryDepth::get(); + let last_reward_era = last_era - 1; + let first_claimable_reward_era = last_era - history_depth; + for i in 17..=last_era { + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = validator_payout_for(time_per_era()); + Session::roll_until_active_era(i); + } + + // verify we clean up history as we go + for era in 0..15 { + assert!(ErasClaimedRewards::::get(era, &11).is_empty()); + } + + // verify only page 0 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + first_claimable_reward_era, + 0 + )); + assert_eq!(ErasClaimedRewards::::get(first_claimable_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + first_claimable_reward_era, + 1 + )); + assert_eq!(ErasClaimedRewards::::get(first_claimable_reward_era, &11), vec![0, 1]); + + // verify only page 0 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 0 + )); + assert_eq!(ErasClaimedRewards::::get(last_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 1 + )); + assert_eq!(ErasClaimedRewards::::get(last_reward_era, &11), vec![0, 1]); + + // Out of order claims works. + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 69, 0)); + assert_eq!(ErasClaimedRewards::::get(69, &11), vec![0]); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 23, 1)); + assert_eq!(ErasClaimedRewards::::get(23, &11), vec![1]); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 42, 0)); + assert_eq!(ErasClaimedRewards::::get(42, &11), vec![0]); + }); +} + +#[test] +fn test_multi_page_payout_stakers_backward_compatible() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1000; + // Track the exposure of the validator and all nominators. + let mut total_exposure = balance; + // Create a validator: + bond_validator(11, balance); // Default(64) + assert_eq!(Validators::::count(), 1); + + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + + // Create nominators, targeting stash of validators + for i in 0..100 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + // with multi page reward payout, payout exposure is same as total exposure. + total_exposure += bond_amount; + } + + Session::roll_until_active_era(2); + Staking::reward_by_ids(vec![(11, 1)]); + + // Since `MaxExposurePageSize = 64`, there are two pages of validator exposure. + assert_eq!(Eras::::exposure_page_count(2, &11), 2); + + // compute and ensure the reward amount is greater than zero. + let payout = validator_payout_for(time_per_era()); + Session::roll_until_active_era(3); + + // verify the exposures are calculated correctly. + let actual_exposure_0 = Eras::::get_paged_exposure(2, &11, 0).unwrap(); + assert_eq!(actual_exposure_0.total(), total_exposure); + assert_eq!(actual_exposure_0.own(), 1000); + assert_eq!(actual_exposure_0.others().len(), 64); + + let actual_exposure_1 = Eras::::get_paged_exposure(2, &11, 1).unwrap(); + assert_eq!(actual_exposure_1.total(), total_exposure); + // own stake is only included once in the first page + assert_eq!(actual_exposure_1.own(), 0); + assert_eq!(actual_exposure_1.others().len(), 100 - 64); + + let pre_payout_total_issuance = pallet_balances::TotalIssuance::::get(); + RewardOnUnbalanceWasCalled::set(false); + + let controller_balance_before_p0_payout = asset::stakeable_balance::(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2)); + // page 0 is claimed + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + let controller_balance_after_p0_payout = asset::stakeable_balance::(&11); + + // verify rewards have been paid out but still some left + assert!(pallet_balances::TotalIssuance::::get() > pre_payout_total_issuance); + assert!(pallet_balances::TotalIssuance::::get() < pre_payout_total_issuance + payout); + + // verify the validator has been rewarded + assert!(controller_balance_after_p0_payout > controller_balance_before_p0_payout); + + // This should payout the second and last page of nominators + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2)); + + // cannot claim any more pages + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // verify the validator was not rewarded the second time + assert_eq!(asset::stakeable_balance::(&11), controller_balance_after_p0_payout); + + // verify all rewards have been paid out + assert_eq_error_rate!( + pallet_balances::TotalIssuance::::get(), + pre_payout_total_issuance + payout, + 2 + ); + assert!(RewardOnUnbalanceWasCalled::get()); + + // verify all nominators of validator 11 are paid out, including the validator + // Validator payout goes to controller. + assert!(asset::stakeable_balance::(&11) > balance); + for i in 0..100 { + assert!(asset::stakeable_balance::(&(1000 + i)) > balance + i as Balance); + } + + // verify rewards are tracked to prevent double claims + for page in 0..Eras::::exposure_page_count(2, &11) { + assert_eq!(Eras::::is_rewards_claimed(2, &11, page), true); + } + + for i in 4..17 { + Staking::reward_by_ids(vec![(11, 1)]); + + // compute and ensure the reward amount is greater than zero. + let payout = validator_payout_for(time_per_era()); + let pre_payout_total_issuance = pallet_balances::TotalIssuance::::get(); + + Session::roll_until_active_era(i); + RewardOnUnbalanceWasCalled::set(false); + mock::make_all_reward_payment(i - 1); + assert_eq_error_rate!( + pallet_balances::TotalIssuance::::get(), + pre_payout_total_issuance + payout, + 2 + ); + assert!(RewardOnUnbalanceWasCalled::get()); + + // verify we track rewards for each era and page + for page in 0..Eras::::exposure_page_count(i - 1, &11) { + assert_eq!(Eras::::is_rewards_claimed(i - 1, &11, page), true); + } + } + + assert_eq!(ErasClaimedRewards::::get(14, &11), vec![0, 1]); + + let last_era = 99; + let history_depth = HistoryDepth::get(); + let last_reward_era = last_era - 1; + let first_claimable_reward_era = last_era - history_depth; + for i in 17..=last_era { + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = validator_payout_for(time_per_era()); + Session::roll_until_active_era(i); + } + + // verify we clean up history as we go + for era in 0..15 { + assert!(ErasClaimedRewards::::get(era, &11).is_empty()); + } + + // verify only page 0 is marked as claimed + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + 11, + first_claimable_reward_era + )); + assert_eq!(ErasClaimedRewards::::get(first_claimable_reward_era, &11), vec![0]); + + // verify page 0 and 1 are marked as claimed + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + 11, + first_claimable_reward_era, + )); + assert_eq!(ErasClaimedRewards::::get(first_claimable_reward_era, &11), vec![0, 1]); + + // change order and verify only page 1 is marked as claimed + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + last_reward_era, + 1 + )); + assert_eq!(ErasClaimedRewards::::get(last_reward_era, &11), vec![1]); + + // verify page 0 is claimed even when explicit page is not passed + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, last_reward_era,)); + + assert_eq!(ErasClaimedRewards::::get(last_reward_era, &11), vec![1, 0]); + + // cannot claim any more pages + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, last_reward_era), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // Create 4 nominator pages + for i in 100..200 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + let test_era = last_era + 1; + Session::roll_until_active_era(test_era); + + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = validator_payout_for(time_per_era()); + + Session::roll_until_active_era(test_era + 1); + + // Out of order claims works. + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, test_era, 2)); + assert_eq!(ErasClaimedRewards::::get(test_era, &11), vec![2]); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(ErasClaimedRewards::::get(test_era, &11), vec![2, 0]); + + // cannot claim page 2 again + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, test_era, 2), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(ErasClaimedRewards::::get(test_era, &11), vec![2, 0, 1]); + + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, test_era)); + assert_eq!(ErasClaimedRewards::::get(test_era, &11), vec![2, 0, 1, 3]); + }); +} + +#[test] +fn test_page_count_and_size() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1000; + // Track the exposure of the validator and all nominators. + // Create a validator: + bond_validator(11, balance); // Default(64) + assert_eq!(Validators::::count(), 1); + + // Create nominators, targeting stash of validators + for i in 0..100 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + Session::roll_until_active_era(2); + + // Since max exposure page size is 64, 2 pages of nominators are created. + assert_eq!(MaxExposurePageSize::get(), 64); + assert_eq!(Eras::::exposure_page_count(2, &11), 2); + + // first page has 64 nominators + assert_eq!(Eras::::get_paged_exposure(2, &11, 0).unwrap().others().len(), 64); + // second page has 36 nominators + assert_eq!(Eras::::get_paged_exposure(2, &11, 1).unwrap().others().len(), 36); + + // now lets decrease page size + MaxExposurePageSize::set(32); + + Session::roll_until_active_era(3); + + // now we expect 4 pages. + assert_eq!(Eras::::exposure_page_count(3, &11), 4); + // first 3 pages have 32 nominators each + assert_eq!(Eras::::get_paged_exposure(3, &11, 0).unwrap().others().len(), 32); + assert_eq!(Eras::::get_paged_exposure(3, &11, 1).unwrap().others().len(), 32); + assert_eq!(Eras::::get_paged_exposure(3, &11, 2).unwrap().others().len(), 32); + assert_eq!(Eras::::get_paged_exposure(3, &11, 3).unwrap().others().len(), 4); + + // now lets decrease page size even more + MaxExposurePageSize::set(5); + Session::roll_until_active_era(4); + + // now we expect the max 20 pages (100/5). + assert_eq!(Eras::::exposure_page_count(4, &11), 20); + }); +} + +#[test] +fn payout_stakers_handles_basic_errors() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + + // Same setup as the test above + let balance = 1000; + bond_validator(11, balance); // Default(64) + + // Create nominators, targeting stash + for i in 0..100 { + bond_nominator(1000 + i, balance + i as Balance, vec![11]); + } + + Session::roll_until_active_era(2); + Staking::reward_by_ids(vec![(11, 1)]); + let _ = validator_payout_for(time_per_era()); + Session::roll_until_active_era(3); + + // Wrong Era, too big + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 3, 0), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + // Wrong Staker + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 10, 2, 0), + Error::::NotStash.with_weight(err_weight) + ); + + let last_era = 99; + for i in 4..=last_era { + Staking::reward_by_ids(vec![(11, 1)]); + // compute and ensure the reward amount is greater than zero. + let _ = validator_payout_for(time_per_era()); + Session::roll_until_active_era(i); + } + + let history_depth = HistoryDepth::get(); + let expected_last_reward_era = last_era - 1; + let expected_start_reward_era = last_era - history_depth; + + // We are at era last_era=99. Given history_depth=80, we should be able + // to payout era starting from expected_start_reward_era=19 through + // expected_last_reward_era=98 (80 total eras), but not 18 or 99. + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era - 1, + 0 + ), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era + 1, + 0 + ), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era, + 0 + )); + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 0 + )); + + // can call page 1 + assert_ok!(Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 1 + )); + + // Can't claim again + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era, + 0 + ), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 0 + ), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 1 + ), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // invalid page + assert_noop!( + Staking::payout_stakers_by_page( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era, + 2 + ), + Error::::InvalidPage.with_weight(err_weight) + ); + }); +} + +#[test] +fn test_commission_paid_across_pages() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let balance = 1; + let commission = 50; + + // Create a validator: + bond_validator(11, balance); + assert_ok!(Staking::validate( + RuntimeOrigin::signed(11), + ValidatorPrefs { commission: Perbill::from_percent(commission), blocked: false } + )); + assert_eq!(Validators::::count(), 1); + + // Create nominators, targeting stash of validators + for i in 0..200 { + let bond_amount = balance + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + } + + Session::roll_until_active_era(2); + Staking::reward_by_ids(vec![(11, 1)]); + + // Since `MaxExposurePageSize = 64`, there are four pages of validator + // exposure. + assert_eq!(Eras::::exposure_page_count(2, &11), 4); + + // compute and ensure the reward amount is greater than zero. + let payout = validator_payout_for(time_per_era()); + Session::roll_until_active_era(3); + + let initial_balance = asset::stakeable_balance::(&11); + // Payout rewards for first exposure page + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0)); + + let controller_balance_after_p0_payout = asset::stakeable_balance::(&11); + + // some commission is paid + assert!(initial_balance < controller_balance_after_p0_payout); + + // payout all pages + for i in 1..4 { + let before_balance = asset::stakeable_balance::(&11); + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, i)); + let after_balance = asset::stakeable_balance::(&11); + // some commission is paid for every page + assert!(before_balance < after_balance); + } + + assert_eq_error_rate!(asset::stakeable_balance::(&11), initial_balance + payout / 2, 1,); + }); +} + +#[test] +fn payout_stakers_handles_weight_refund() { + // Note: this test relies on the assumption that `payout_stakers_alive_staked` is solely used by + // `payout_stakers` to calculate the weight of each payout op. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + use crate::Call as StakingCall; + let max_nom_rewarded = MaxExposurePageSize::get(); + + // Make sure the configured value is meaningful for our use. + assert!(max_nom_rewarded >= 4); + let half_max_nom_rewarded = max_nom_rewarded / 2; + + // Sanity check our max and half max nominator quantities. + assert!(half_max_nom_rewarded > 0); + assert!(max_nom_rewarded > half_max_nom_rewarded); + + let max_nom_rewarded_weight = + ::WeightInfo::payout_stakers_alive_staked(max_nom_rewarded); + let half_max_nom_rewarded_weight = + ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); + let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); + + assert!(zero_nom_payouts_weight.any_gt(Weight::zero())); + assert!(half_max_nom_rewarded_weight.any_gt(zero_nom_payouts_weight)); + assert!(max_nom_rewarded_weight.any_gt(half_max_nom_rewarded_weight)); + + let balance = 1000; + bond_validator(11, balance); + + // Era 2 + Session::roll_until_active_era(2); + + // Reward just the validator. + Staking::reward_by_ids(vec![(11, 1)]); + + // Add some `half_max_nom_rewarded` nominators who will start backing the validator in the + // next era. + for i in 0..half_max_nom_rewarded { + bond_nominator((1000 + i).into(), balance + i as Balance, vec![11]); + } + + // Era 3 + Session::roll_until_active_era(3); + + // Collect payouts when there are no nominators + let call = RuntimeCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 2, + page: 0, + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(20)); + + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); + + // The validator is not rewarded in this era; so there will be zero payouts to claim for + // this era. + + // next era -- with nominators now + Session::roll_until_active_era(4); + + // Collect payouts for an era where the validator did not receive any points. + let call = RuntimeCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 3, + page: 0, + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); + + // Reward the validator and its nominators. + Staking::reward_by_ids(vec![(11, 1)]); + + // Era 5 + Session::roll_until_active_era(5); + + // Collect payouts when the validator has `half_max_nom_rewarded` nominators. + let call = RuntimeCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 4, + page: 0, + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), half_max_nom_rewarded_weight); + + // Add enough nominators so that we are at the limit. They will be active nominators + // in the next era. + for i in half_max_nom_rewarded..max_nom_rewarded { + bond_nominator((1000 + i).into(), balance + i as Balance, vec![11]); + } + + // Era 6 + Session::roll_until_active_era(6); + + // We now have `max_nom_rewarded` nominators actively nominating our validator. + // Reward the validator so we can collect for everyone in the next era. + Staking::reward_by_ids(vec![(11, 1)]); + + // Era 7 + Session::roll_until_active_era(7); + + // Collect payouts when the validator had `half_max_nom_rewarded` nominators. + let call = RuntimeCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 6, + page: 0, + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); + + // Try and collect payouts for an era that has already been collected. + let call = RuntimeCall::Staking(StakingCall::payout_stakers_by_page { + validator_stash: 11, + era: 6, + page: 0, + }); + let info = call.get_dispatch_info(); + let result = call.dispatch(RuntimeOrigin::signed(20)); + assert!(result.is_err()); + // When there is an error the consumed weight == weight when there are 0 nominator payouts. + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); + }); +} + +#[test] +fn test_runtime_api_pending_rewards() { + ExtBuilder::default().build_and_execute(|| { + // GIVEN + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + let stake = 100; + + // validator with non-paged exposure, rewards marked in legacy claimed rewards. + let validator_one = 301; + // validator with non-paged exposure, rewards marked in paged claimed rewards. + let validator_two = 302; + // validator with paged exposure. + let validator_three = 303; + + // Set staker + for v in validator_one..=validator_three { + let _ = asset::set_stakeable_balance::(&v, stake); + assert_ok!(Staking::bond(RuntimeOrigin::signed(v), stake, RewardDestination::Staked)); + } + + // Add reward points + let reward = EraRewardPoints:: { + total: 1, + individual: vec![(validator_one, 1), (validator_two, 1), (validator_three, 1)] + .into_iter() + .collect(), + }; + ErasRewardPoints::::insert(0, reward); + + // build exposure + let mut individual_exposures: Vec> = vec![]; + for i in 0..=MaxExposurePageSize::get() { + individual_exposures.push(IndividualExposure { who: i.into(), value: stake }); + } + let exposure = Exposure:: { + total: stake * (MaxExposurePageSize::get() as Balance + 2), + own: stake, + others: individual_exposures, + }; + + // add exposure for validators + Eras::::upsert_exposure(0, &validator_one, exposure.clone()); + Eras::::upsert_exposure(0, &validator_two, exposure.clone()); + + // add some reward to be distributed + ErasValidatorReward::::insert(0, 1000); + + // SCENARIO: Validator with paged exposure (two pages). + // validators have not claimed rewards, so pending rewards is true. + assert!(Eras::::pending_rewards(0, &validator_one)); + assert!(Eras::::pending_rewards(0, &validator_two)); + // and payout works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0)); + // validators have two pages of exposure, so pending rewards is still true. + assert!(Eras::::pending_rewards(0, &validator_one)); + assert!(Eras::::pending_rewards(0, &validator_two)); + // payout again only for validator one + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0)); + // now pending rewards is false for validator one + assert!(!Eras::::pending_rewards(0, &validator_one)); + // and payout fails for validator one + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + // while pending reward is true for validator two + assert!(Eras::::pending_rewards(0, &validator_two)); + // and payout works again for validator two. + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0)); + }); +} diff --git a/substrate/frame/staking-async/src/tests/slashing.rs b/substrate/frame/staking-async/src/tests/slashing.rs new file mode 100644 index 0000000000000..0e96c6e97305f --- /dev/null +++ b/substrate/frame/staking-async/src/tests/slashing.rs @@ -0,0 +1,1696 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::{session_rotation::Eras, slashing}; +use pallet_staking_async_rc_client as rc_client; +use sp_runtime::{Perquintill, Rounding}; +use sp_staking::StakingInterface; + +#[test] +fn nominators_also_get_slashed_pro_rata() { + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + let initial_exposure = Staking::eras_stakers(active_era(), &11); + assert_eq!( + initial_exposure, + Exposure { + total: 1250, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 250 }] + } + ); + + // staked values; + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = asset::stakeable_balance::(&101); + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = asset::stakeable_balance::(&11); + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // register a slash for 11 with 10%. + add_slash(11); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(10) + }] + ); + + // roll one block until it is applied + assert_eq!(SlashDeferDuration::get(), 0); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 25 } + ] + ); + + // both stakes must have been decreased. + assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); + assert!(Staking::ledger(11.into()).unwrap().active < validator_stake); + + let slash_amount = Perbill::from_percent(10) * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); + + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); + assert_eq!( + asset::stakeable_balance::(&101), // free balance + nominator_balance - nominator_share, + ); + assert_eq!( + asset::stakeable_balance::(&11), // free balance + validator_balance - validator_share, + ); + }); +} + +#[test] +fn slashing_performed_according_exposure() { + // This test checks that slashing is performed according the exposure (or more precisely, + // historical exposure), not the current balance. + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000); + + // Handle an offence with a historical exposure. + add_slash_with_percent(11, 50); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(50) + }] + ); + + // roll one block until it is applied + assert_eq!(SlashDeferDuration::get(), 0); + + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: 500 }, + ] + ); + + // The stash account should be slashed for 250 (50% of 500). + assert_eq!(asset::stakeable_balance::(&11), 1000 / 2); + }); +} + +#[test] +fn offence_doesnt_force_new_era() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + add_slash(11); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + }); +} + +#[test] +fn offence_ensures_new_era_without_clobbering() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root())); + assert_eq!(ForceEra::::get(), Forcing::ForceAlways); + + add_slash(11); + + assert_eq!(ForceEra::::get(), Forcing::ForceAlways); + }); +} + +#[test] +fn add_slash_works() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert_eq_uvec!(session_validators(), vec![11, 21]); + + add_slash(11); + // roll to apply the slash + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(10) + }, + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: 100 }, + ] + ); + + // no one is chilled, FYI + assert!(Validators::::contains_key(11)); + }); +} + +#[test] +fn only_first_reporter_receive_the_slice() { + // This test verifies that the first reporter of the offence receive their slice from the + // slashed amount. + ExtBuilder::default().nominate(false).build_and_execute(|| { + // The reporters' reward is calculated from the total exposure. + assert_eq!(Staking::eras_stakers(active_era(), &11).total, 1000); + + let initial_balance_1 = asset::total_balance::(&1); + let initial_balance_2 = asset::total_balance::(&2); + + ::on_new_offences( + session_mock::Session::current_index(), + vec![rc_client::Offence { + offender: 11, + reporters: vec![1, 2], + slash_fraction: Perbill::from_percent(50), + }], + ); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(50) + }, + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: 500 }, + ] + ); + + let reward = 500 / 20; + assert_eq!(asset::total_balance::(&1), initial_balance_1 + reward); + // second reporter got nothing + assert_eq!(asset::total_balance::(&2), initial_balance_2); + }); +} + +#[test] +fn subsequent_reports_in_same_span_pay_out_less() { + // This test verifies that the reporters of the offence receive their slice from the slashed + // amount, but less and less if they submit multiple reports in one span. + ExtBuilder::default().nominate(false).build_and_execute(|| { + // The reporters' reward is calculated from the total exposure. + let initial_balance = 1000; + + assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); + let initial_balance_1 = asset::total_balance::(&1); + + ::on_new_offences( + session_mock::Session::current_index(), + vec![rc_client::Offence { + offender: 11, + reporters: vec![1], + slash_fraction: Perbill::from_percent(20), + }], + ); + Session::roll_next(); + + // F1 * (reward_proportion * slash - 0) + // 50% * (10% * initial_balance * 20%) + let reward = (initial_balance / 5) / 20; + assert_eq!(reward, 10); + assert_eq!(asset::total_balance::(&1), initial_balance_1 + reward); + + ::on_new_offences( + session_mock::Session::current_index(), + vec![rc_client::Offence { + offender: 11, + reporters: vec![1], + slash_fraction: Perbill::from_percent(50), + }], + ); + Session::roll_next(); + + let prior_payout = reward; + // F1 * (reward_proportion * slash - prior_payout) + // 50% * (10% * (initial_balance / 2) - prior_payout) + let reward = ((initial_balance / 20) - prior_payout) / 2; + assert_eq!(reward, 20); + assert_eq!(asset::total_balance::(&1), initial_balance_1 + prior_payout + reward); + }); +} + +#[test] +fn deferred_slashes_are_deferred() { + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + let exposure = Staking::eras_stakers(active_era(), &11); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + + // only 1 page of exposure, so slashes will be applied in one block. + assert_eq!(Eras::::exposure_page_count(1, &11), 1); + + add_slash(11); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(10) + }] + ); + + // slash computed in the next block + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 },] + ); + + // nominations are not removed regardless of the deferring. + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + Session::roll_until_active_era(2); + // no slash applied + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 4, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 6, active_era: 2, planned_era: 2 } + ] + ); + + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + // the slashes for era 1 will start applying in era 3, to end before era 4. + Session::roll_until_active_era(3); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 7, active_era: 2, planned_era: 3 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 8, active_era: 2, planned_era: 3 }, + Event::EraPaid { era_index: 2, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 9, active_era: 3, planned_era: 3 } + ] + ); + + // Slashes not applied yet. Will apply in the next block after era starts. + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + // trigger slashing by advancing block. + Session::roll_next(); + + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&101), 500 - (nominated_value / 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 25 } + ] + ); + }) +} + +#[test] +fn retroactive_deferred_slashes_two_eras_before() { + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + assert_eq!(BondingDuration::get(), 3); + assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); + + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + // slash for era 1 detected in era 2, defer for 2, apply in era 3. + add_slash_in_era(11, 1); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(10) + }] + ); + + // computed in next block, but not applied + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 }] + ); + + Session::roll_until_active_era(3); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 7, active_era: 2, planned_era: 3 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 8, active_era: 2, planned_era: 3 }, + Event::EraPaid { era_index: 2, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 9, active_era: 3, planned_era: 3 } + ] + ); + + // Slashes not applied yet. Will apply in the next block after era starts. + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 25 } + ] + ); + }) +} + +#[test] +fn retroactive_deferred_slashes_one_before() { + ExtBuilder::default() + .slash_defer_duration(2) + .nominate(false) + .build_and_execute(|| { + assert_eq!(BondingDuration::get(), 3); + + // unbond at slash era. + Session::roll_until_active_era(2); + + assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 100)); + + Session::roll_until_active_era(3); + // ignore all events thus far + let _ = staking_events_since_last_call(); + + add_slash_in_era(11, 2); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + offence_era: 2, + validator: 11, + fraction: Perbill::from_percent(10) + }] + ); + + // computed in next block, but not applied + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SlashComputed { offence_era: 2, slash_era: 4, offender: 11, page: 0 }] + ); + + Session::roll_until_active_era(4); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 10, active_era: 3, planned_era: 4 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 11, active_era: 3, planned_era: 4 }, + Event::EraPaid { era_index: 3, validator_payout: 7500, remainder: 7500 }, + Event::SessionRotated { starting_session: 12, active_era: 4, planned_era: 4 } + ] + ); + + // no slash applied yet + assert_eq!(Staking::ledger(11.into()).unwrap().total, 1000); + + // slash happens at next blocks. + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::Slashed { staker: 11, amount: 100 }] + ); + + // their ledger has already been slashed. + assert_eq!(Staking::ledger(11.into()).unwrap().total, 900); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(11), 1000)); + assert_eq!(Staking::ledger(11.into()).unwrap().total, 900); + }) +} + +#[test] +fn invulnerables_are_not_slashed() { + // For invulnerable validators no slashing is performed. + ExtBuilder::default() + .invulnerables(vec![11]) + .nominate(false) + .build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&21), 1000); + + let initial_balance = Staking::slashable_balance_of(&21); + + // slash both + add_slash(11); + add_slash(21); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 21, + fraction: Perbill::from_percent(10) + }, + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 }, + Event::Slashed { staker: 21, amount: 100 } + ] + ); + + // The validator 11 hasn't been slashed, but 21 has been. + assert_eq!(asset::stakeable_balance::(&11), 1000); + // 1000 - (0.1 * initial_balance) + assert_eq!(asset::stakeable_balance::(&21), 1000 - (initial_balance / 10)); + }); +} + +#[test] +fn dont_slash_if_fraction_is_zero() { + // Don't slash if the fraction is zero. + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 1000); + + add_slash_with_percent(11, 0); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { offence_era: 1, validator: 11, fraction: Zero::zero() }] + ); + + // The validator hasn't been slashed. The new era is not forced. + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + }); +} + +#[test] +fn only_slash_for_max_in_era() { + // multiple slashes within one era are only applied if it is more than any previous slash in the + // same era. + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 1000); + + add_slash_with_percent(11, 50); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(50) + }, + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: 500 } + ] + ); + + // The validator has been slashed and has been force-chilled. + assert_eq!(asset::stakeable_balance::(&11), 500); + + add_slash_with_percent(11, 25); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(25) + },] + ); + + // The validator has not been slashed additionally. + assert_eq!(asset::stakeable_balance::(&11), 500); + + // now slash for more than 50 + add_slash_with_percent(11, 60); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(60) + }, + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: 100 } + ] + ); + + // The validator got slashed 10% more. + assert_eq!(asset::stakeable_balance::(&11), 400); + }) +} + +#[test] +fn garbage_collection_after_slashing() { + // ensures that `SlashingSpans` and `SpanSlash` of an account is removed after reaping. + ExtBuilder::default() + .existential_deposit(2) + .balance_factor(2) + .build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 2000); + + add_slash_with_percent(11, 10); + Session::roll_next(); + + assert_eq!(asset::stakeable_balance::(&11), 2000 - 200); + assert!(SlashingSpans::::get(&11).is_some()); + assert_eq!(SpanSlash::::get(&(11, 0)).amount(), &200); + + add_slash_with_percent(11, 100); + Session::roll_next(); + + // validator and nominator slash in era are garbage-collected by era change, + // so we don't test those here. + + assert_eq!(asset::stakeable_balance::(&11), 0); + // Non staked balance is not touched. + assert_eq!(asset::total_balance::(&11), ExistentialDeposit::get()); + + let slashing_spans = SlashingSpans::::get(&11).unwrap(); + assert_eq!(slashing_spans.iter().count(), 2); + + // reap_stash respects num_slashing_spans so that weight is accurate + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0), + Error::::IncorrectSlashingSpans + ); + assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 2)); + + assert!(SlashingSpans::::get(&11).is_none()); + assert_eq!(SpanSlash::::get(&(11, 0)).amount(), &0); + }) +} + +#[test] +fn garbage_collection_on_window_pruning() { + // ensures that `ValidatorSlashInEra` and `NominatorSlashInEra` are cleared after + // `BondingDuration`. + ExtBuilder::default().build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 1000); + let now = active_era(); + + let exposure = Staking::eras_stakers(now, &11); + assert_eq!(asset::stakeable_balance::(&101), 500); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + + add_slash(11); + Session::roll_next(); + + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&101), 500 - (nominated_value / 10)); + + assert!(ValidatorSlashInEra::::get(&now, &11).is_some()); + assert!(NominatorSlashInEra::::get(&now, &101).is_some()); + + // + 1 because we have to exit the bonding window. + for era in (0..(BondingDuration::get() + 1)).map(|offset| offset + now + 1) { + assert!(ValidatorSlashInEra::::get(&now, &11).is_some()); + assert!(NominatorSlashInEra::::get(&now, &101).is_some()); + + Session::roll_until_active_era(era); + } + + assert!(ValidatorSlashInEra::::get(&now, &11).is_none()); + assert!(NominatorSlashInEra::::get(&now, &101).is_none()); + }) +} + +#[test] +fn slashing_nominators_by_span_max() { + ExtBuilder::default().build_and_execute(|| { + Session::roll_until_active_era(3); + + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&21), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + assert_eq!(Staking::slashable_balance_of(&21), 1000); + + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); + let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; + let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; + + add_slash_in_era(11, 2); + Session::roll_next(); + + assert_eq!(asset::stakeable_balance::(&11), 900); + + let slash_1_amount = Perbill::from_percent(10) * nominated_value_11; + assert_eq!(asset::stakeable_balance::(&101), 500 - slash_1_amount); + + let expected_spans = vec![ + slashing::SlashingSpan { index: 1, start: 4, length: None }, + slashing::SlashingSpan { index: 0, start: 0, length: Some(4) }, + ]; + + let get_span = |account| SlashingSpans::::get(&account).unwrap(); + + assert_eq!(get_span(11).iter().collect::>(), expected_spans); + assert_eq!(get_span(101).iter().collect::>(), expected_spans); + + // second slash: higher era, higher value, same span. + add_slash_in_era_with_value(21, 3, Perbill::from_percent(30)); + Session::roll_next(); + + // 11 was not further slashed, but 21 and 101 were. + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&21), 700); + + let slash_2_amount = Perbill::from_percent(30) * nominated_value_21; + assert!(slash_2_amount > slash_1_amount); + + // only the maximum slash in a single span is taken. + assert_eq!(asset::stakeable_balance::(&101), 500 - slash_2_amount); + + // third slash: in same era and on same validator as first, higher in-era value, but lower + // slash value than slash 2. + add_slash_in_era_with_value(11, 2, Perbill::from_percent(20)); + Session::roll_next(); + + // 11 was further slashed, but 21 and 101 were not. + assert_eq!(asset::stakeable_balance::(&11), 800); + assert_eq!(asset::stakeable_balance::(&21), 700); + + let slash_3_amount = Perbill::from_percent(20) * nominated_value_21; + assert!(slash_3_amount < slash_2_amount); + assert!(slash_3_amount > slash_1_amount); + + // only the maximum slash in a single span is taken. + assert_eq!(asset::stakeable_balance::(&101), 500 - slash_2_amount); + }); +} + +#[test] +fn slashes_are_summed_across_spans() { + ExtBuilder::default().nominate(false).build_and_execute(|| { + Session::roll_until_active_era(3); + + assert_eq!(asset::stakeable_balance::(&21), 1000); + assert_eq!(Staking::slashable_balance_of(&21), 1000); + + let get_span = |account| SlashingSpans::::get(&account).unwrap(); + + add_slash(21); + Session::roll_next(); + + let expected_spans = vec![ + slashing::SlashingSpan { index: 1, start: 4, length: None }, + slashing::SlashingSpan { index: 0, start: 0, length: Some(4) }, + ]; + + assert_eq!(get_span(21).iter().collect::>(), expected_spans); + assert_eq!(asset::stakeable_balance::(&21), 900); + assert_eq!(Staking::slashable_balance_of(&21), 900); + + Session::roll_until_active_era(4); + add_slash(21); + Session::roll_next(); + + let expected_spans = vec![ + slashing::SlashingSpan { index: 2, start: 5, length: None }, + slashing::SlashingSpan { index: 1, start: 4, length: Some(1) }, + slashing::SlashingSpan { index: 0, start: 0, length: Some(4) }, + ]; + + assert_eq!(get_span(21).iter().collect::>(), expected_spans); + assert_eq!(asset::stakeable_balance::(&21), 810); + }); +} + +#[test] +fn staker_cannot_bail_deferred_slash() { + // as long as SlashDeferDuration is less than BondingDuration, this should not be possible. + ExtBuilder::default() + .slash_defer_duration(2) + .bonding_duration(3) + .build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + add_slash(11); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(10) + }, + Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 } + ] + ); + + // now we chill + assert_ok!(Staking::chill(RuntimeOrigin::signed(101))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(101), 500)); + + assert_eq!(CurrentEra::::get().unwrap(), 1); + assert_eq!(active_era(), 1); + + assert_eq!( + Ledger::::get(101).unwrap(), + StakingLedgerInspect { + active: 0, + total: 500, + stash: 101, + unlocking: bounded_vec![UnlockChunk { era: 4u32, value: 500 }], + } + ); + + // no slash yet. + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + // no slash yet. + Session::roll_until_active_era(2); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + // no slash yet. + Session::roll_until_active_era(3); + let _ = staking_events_since_last_call(); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + assert_eq!(CurrentEra::::get().unwrap(), 3); + assert_eq!(active_era(), 3); + + // and cannot yet unbond: + assert_storage_noop!(assert!(Staking::withdraw_unbonded( + RuntimeOrigin::signed(101), + 0 + ) + .is_ok())); + + // first block of era 3, slashes are applied. + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 25 } + ] + ); + + assert_eq!(asset::stakeable_balance::(&11), 900); + assert_eq!(asset::stakeable_balance::(&101), 500 - 25); + + // and the leftover of the funds can now be unbonded. + }) +} + +#[test] +fn remove_deferred() { + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + // deferred to start of era 3. + add_slash(11); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(10) + }, + Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 } + ] + ); + + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + // reported later, but deferred to start of era 3 as well. + add_slash_in_era_with_value(11, 1, Perbill::from_percent(15)); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(15) + }, + Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 } + ] + ); + + assert_eq!( + UnappliedSlashes::::iter_prefix(&3).collect::>(), + vec![ + ( + (11, Perbill::from_percent(10), 0), + UnappliedSlash { + validator: 11, + own: 100, + others: bounded_vec![(101, 25)], + reporter: None, + payout: 6 + } + ), + ( + (11, Perbill::from_percent(15), 0), + UnappliedSlash { + validator: 11, + own: 50, + others: bounded_vec![(101, 12)], + reporter: None, + payout: 6 + } + ), + ] + ); + + // fails if empty + assert_noop!( + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![]), + Error::::EmptyTargets + ); + + // cancel the slash with 10%. + assert_ok!(Staking::cancel_deferred_slash( + RuntimeOrigin::root(), + 3, + vec![(11, Perbill::from_percent(10), 0)] + )); + assert_eq!(UnappliedSlashes::::iter_prefix(&3).count(), 1); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SlashCancelled { + slash_era: 3, + slash_key: (11, Perbill::from_percent(10), 0), + payout: 6 + }] + ); + + // apply the one with 15%. + Session::roll_until_active_era(3); + let _ = staking_events_since_last_call(); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::Slashed { staker: 11, amount: 50 }, + Event::Slashed { staker: 101, amount: 12 } + ] + ); + }) +} + +#[test] +fn remove_multi_deferred() { + ExtBuilder::default() + .slash_defer_duration(2) + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .build_and_execute(|| { + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 500); + + add_slash_with_percent(11, 10); + add_slash_with_percent(21, 10); + add_slash_with_percent(41, 25); + Session::roll_next(); + Session::roll_next(); + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 1, + validator: 11, + fraction: Perbill::from_percent(10) + }, + Event::OffenceReported { + offence_era: 1, + validator: 21, + fraction: Perbill::from_percent(10) + }, + Event::OffenceReported { + offence_era: 1, + validator: 41, + fraction: Perbill::from_percent(25) + }, + Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 41, page: 0 }, + Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 21, page: 0 }, + Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 }, + ] + ); + + // there are 3 slashes to be applied in era 3. + assert_eq!(UnappliedSlashes::::iter_prefix(&3).count(), 3); + + // lets cancel 2 of them. + assert_ok!(Staking::cancel_deferred_slash( + RuntimeOrigin::root(), + 3, + vec![(11, Perbill::from_percent(10), 0), (21, Perbill::from_percent(10), 0),] + )); + + let slashes = UnappliedSlashes::::iter_prefix(&3).collect::>(); + assert_eq!(slashes.len(), 1); + }) +} + +#[test] +fn proportional_slash_stop_slashing_if_remaining_zero() { + ExtBuilder::default().nominate(true).build_and_execute(|| { + let c = |era, value| UnlockChunk:: { era, value }; + + // we have some chunks, but they are not affected. + let unlocking = bounded_vec![c(1, 10), c(2, 10)]; + + // Given + let mut ledger = StakingLedger::::new(123, 20); + ledger.total = 40; + ledger.unlocking = unlocking; + + assert_eq!(BondingDuration::get(), 3); + + // should not slash more than the amount requested, by accidentally slashing the first + // chunk. + assert_eq!(ledger.slash(18, 1, 0), 18); + }); +} + +#[test] +fn proportional_ledger_slash_works() { + ExtBuilder::default().nominate(true).build_and_execute(|| { + let c = |era, value| UnlockChunk:: { era, value }; + // Given + let mut ledger = StakingLedger::::new(123, 10); + assert_eq!(BondingDuration::get(), 3); + + // When we slash a ledger with no unlocking chunks + assert_eq!(ledger.slash(5, 1, 0), 5); + // Then + assert_eq!(ledger.total, 5); + assert_eq!(ledger.active, 5); + assert_eq!(LedgerSlashPerEra::get().0, 5); + assert_eq!(LedgerSlashPerEra::get().1, Default::default()); + + // When we slash a ledger with no unlocking chunks and the slash amount is greater then the + // total + assert_eq!(ledger.slash(11, 1, 0), 5); + // Then + assert_eq!(ledger.total, 0); + assert_eq!(ledger.active, 0); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!(LedgerSlashPerEra::get().1, Default::default()); + + // Given + ledger.unlocking = bounded_vec![c(4, 10), c(5, 10)]; + ledger.total = 2 * 10; + ledger.active = 0; + // When all the chunks overlap with the slash eras + assert_eq!(ledger.slash(20, 0, 0), 20); + // Then + assert_eq!(ledger.unlocking, vec![]); + assert_eq!(ledger.total, 0); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(4, 0), (5, 0)])); + + // Given + ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)]; + ledger.total = 4 * 100; + ledger.active = 0; + // When the first 2 chunks don't overlap with the affected range of unlock eras. + assert_eq!(ledger.slash(140, 0, 3), 140); + // Then + assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 30), c(7, 30)]); + assert_eq!(ledger.total, 4 * 100 - 140); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 30), (7, 30)])); + + // Given + ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)]; + ledger.total = 4 * 100; + ledger.active = 0; + // When the first 2 chunks don't overlap with the affected range of unlock eras. + assert_eq!(ledger.slash(15, 0, 3), 15); + // Then + assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 100 - 8), c(7, 100 - 7)]); + assert_eq!(ledger.total, 4 * 100 - 15); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 92), (7, 93)])); + + // Given + ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)]; + ledger.active = 500; + // 900 + ledger.total = 40 + 10 + 100 + 250 + 500; + // When we have a partial slash that touches all chunks + assert_eq!(ledger.slash(900 / 2, 0, 0), 450); + // Then + assert_eq!(ledger.active, 500 / 2); + assert_eq!( + ledger.unlocking, + vec![c(4, 40 / 2), c(5, 100 / 2), c(6, 10 / 2), c(7, 250 / 2)] + ); + assert_eq!(ledger.total, 900 / 2); + assert_eq!(LedgerSlashPerEra::get().0, 500 / 2); + assert_eq!( + LedgerSlashPerEra::get().1, + BTreeMap::from([(4, 40 / 2), (5, 100 / 2), (6, 10 / 2), (7, 250 / 2)]) + ); + + // slash 1/4th with not chunk. + ledger.unlocking = bounded_vec![]; + ledger.active = 500; + ledger.total = 500; + // When we have a partial slash that touches all chunks + assert_eq!(ledger.slash(500 / 4, 0, 0), 500 / 4); + // Then + assert_eq!(ledger.active, 3 * 500 / 4); + assert_eq!(ledger.unlocking, vec![]); + assert_eq!(ledger.total, ledger.active); + assert_eq!(LedgerSlashPerEra::get().0, 3 * 500 / 4); + assert_eq!(LedgerSlashPerEra::get().1, Default::default()); + + // Given we have the same as above, + ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)]; + ledger.active = 500; + ledger.total = 40 + 10 + 100 + 250 + 500; // 900 + assert_eq!(ledger.total, 900); + // When we have a higher min balance + assert_eq!( + ledger.slash( + 900 / 2, + 25, /* min balance - chunks with era 0 & 2 will be slashed to <=25, causing it + * to get swept */ + 0 + ), + 450 + ); + assert_eq!(ledger.active, 500 / 2); + // the last chunk was not slashed 50% like all the rest, because some other earlier chunks + // got dusted. + assert_eq!(ledger.unlocking, vec![c(5, 100 / 2), c(7, 150)]); + assert_eq!(ledger.total, 900 / 2); + assert_eq!(LedgerSlashPerEra::get().0, 500 / 2); + assert_eq!( + LedgerSlashPerEra::get().1, + BTreeMap::from([(4, 0), (5, 100 / 2), (6, 0), (7, 150)]) + ); + + // Given + // slash order --------------------NA--------2----------0----------1---- + ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)]; + ledger.active = 500; + ledger.total = 40 + 10 + 100 + 250 + 500; // 900 + assert_eq!( + ledger.slash( + 500 + 10 + 250 + 100 / 2, // active + era 6 + era 7 + era 5 / 2 + 0, + 3 /* slash era 6 first, so the affected parts are era 6, era 7 and + * ledge.active. This will cause the affected to go to zero, and then we will + * start slashing older chunks */ + ), + 500 + 250 + 10 + 100 / 2 + ); + // Then + assert_eq!(ledger.active, 0); + assert_eq!(ledger.unlocking, vec![c(4, 40), c(5, 100 / 2)]); + assert_eq!(ledger.total, 90); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(5, 100 / 2), (6, 0), (7, 0)])); + + // Given + // iteration order------------------NA---------2----------0----------1---- + ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)]; + ledger.active = 100; + ledger.total = 5 * 100; + // When + assert_eq!( + ledger.slash( + 351, // active + era 6 + era 7 + era 5 / 2 + 1 + 50, // min balance - everything slashed below 50 will get dusted + 3 /* slash era 3+3 first, so the affected parts are era 6, era 7 and + * ledge.active. This will cause the affected to go to zero, and then we + * will start slashing older chunks */ + ), + 400 + ); + // Then + assert_eq!(ledger.active, 0); + assert_eq!(ledger.unlocking, vec![c(4, 100)]); + assert_eq!(ledger.total, 100); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(5, 0), (6, 0), (7, 0)])); + + // Tests for saturating arithmetic + + // Given + let slash = u64::MAX as Balance * 2; + // The value of the other parts of ledger that will get slashed + let value = slash - (10 * 4); + + ledger.active = 10; + ledger.unlocking = bounded_vec![c(4, 10), c(5, 10), c(6, 10), c(7, value)]; + ledger.total = value + 40; + // When + let slash_amount = ledger.slash(slash, 0, 0); + assert_eq_error_rate!(slash_amount, slash, 5); + // Then + assert_eq!(ledger.active, 0); // slash of 9 + assert_eq!(ledger.unlocking, vec![]); + assert_eq!(ledger.total, 0); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(4, 0), (5, 0), (6, 0), (7, 0)])); + + // Given + use sp_runtime::PerThing as _; + let slash = u64::MAX as Balance * 2; + let value = u64::MAX as Balance * 2; + let unit = 100; + // slash * value that will saturate + assert!(slash.checked_mul(value).is_none()); + // but slash * unit won't. + assert!(slash.checked_mul(unit).is_some()); + ledger.unlocking = bounded_vec![c(4, unit), c(5, value), c(6, unit), c(7, unit)]; + //--------------------------------------note value^^^ + ledger.active = unit; + ledger.total = unit * 4 + value; + // When + assert_eq!(ledger.slash(slash, 0, 0), slash); + // Then + // The amount slashed out of `unit` + let affected_balance = value + unit * 4; + let ratio = Perquintill::from_rational_with_rounding(slash, affected_balance, Rounding::Up) + .unwrap(); + // `unit` after the slash is applied + let unit_slashed = { + let unit_slash = ratio.mul_ceil(unit); + unit - unit_slash + }; + let value_slashed = { + let value_slash = ratio.mul_ceil(value); + value - value_slash + }; + assert_eq!(ledger.active, unit_slashed); + assert_eq!(ledger.unlocking, vec![c(5, value_slashed), c(7, 32)]); + assert_eq!(ledger.total, value_slashed + 32); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!( + LedgerSlashPerEra::get().1, + BTreeMap::from([(4, 0), (5, value_slashed), (6, 0), (7, 32)]) + ); + }); +} + +mod paged_slashing { + use super::*; + use crate::slashing::OffenceRecord; + + #[test] + fn offence_processed_in_multi_block() { + // Ensure each page is processed only once. + ExtBuilder::default() + .has_stakers(false) + .slash_defer_duration(3) + .build_and_execute(|| { + let base_stake = 1000; + + // Create a validator: + bond_validator(11, base_stake); + assert_eq!(Validators::::count(), 1); + + // Track the total exposure of 11. + let mut exposure_counter = base_stake; + + // Exposure page size is 64, hence it creates 4 pages of exposure. + let expected_page_count = 4; + for i in 0..200 { + let bond_amount = base_stake + i as Balance; + bond_nominator(1000 + i, bond_amount, vec![11]); + // with multi page reward payout, payout exposure is same as total exposure. + exposure_counter += bond_amount; + } + + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + assert_eq!( + ErasStakersOverview::::get(2, 11).expect("exposure should exist"), + PagedExposureMetadata { + total: exposure_counter, + own: base_stake, + page_count: expected_page_count, + nominator_count: 200, + } + ); + + // report an offence for 11 in era 2. + add_slash(11); + + // ensure offence is queued. + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + validator: 11, + fraction: Perbill::from_percent(10), + offence_era: 2 + }] + ); + + // ensure offence queue has items. + assert_eq!( + OffenceQueue::::get(2, 11).unwrap(), + slashing::OffenceRecord { + reporter: None, + reported_era: 2, + // first page to be marked for processing. + exposure_page: expected_page_count - 1, + slash_fraction: Perbill::from_percent(10), + prior_slash_fraction: Perbill::zero(), + } + ); + + // The offence era is noted in the queue. + assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); + + // ensure Processing offence is empty yet. + assert_eq!(ProcessingOffence::::get(), None); + + // ensure no unapplied slashes for era 5 (offence_era + slash_defer_duration). + assert_eq!(UnappliedSlashes::::iter_prefix(&5).collect::>().len(), 0); + + // Checkpoint 1: advancing to next block will compute the first page of slash. + Session::roll_next(); + + // ensure the last page of offence is processed. + // (offence is processed in reverse order of pages) + assert_eq!( + staking_events_since_last_call().as_slice(), + vec![Event::SlashComputed { + offence_era: 2, + slash_era: 5, + offender: 11, + page: expected_page_count - 1 + },] + ); + + // offender is removed from offence queue + assert_eq!(OffenceQueue::::get(2, 11), None); + + // offence era is removed from queue. + assert_eq!(OffenceQueueEras::::get(), None); + + // this offence is not completely processed yet, so it should be in processing. + assert_eq!( + ProcessingOffence::::get(), + Some(( + 2, + 11, + OffenceRecord { + reporter: None, + reported_era: 2, + // page 3 is processed, next page to be processed is 2. + exposure_page: 2, + slash_fraction: Perbill::from_percent(10), + prior_slash_fraction: Perbill::zero(), + } + )) + ); + + // unapplied slashes for era 5. + let slashes = UnappliedSlashes::::iter_prefix(&5).collect::>(); + + // only one unapplied slash exists. + assert_eq!(slashes.len(), 1); + let (slash_key, unapplied_slash) = &slashes[0]; + + // this is a unique key to ensure unapplied slash is not overwritten for multiple + // offence by offender in the same era. + assert_eq!(*slash_key, (11, Perbill::from_percent(10), expected_page_count - 1)); + + // validator own stake is only included in the first page. Since this is page 3, + // only nominators are slashed. + assert_eq!(unapplied_slash.own, 0); + assert_eq!(unapplied_slash.validator, 11); + assert_eq!(unapplied_slash.others.len(), 200 % 64); + + // Checkpoint 2: advancing to next block will compute the second page of slash. + Session::roll_next(); + + // offence queue still empty + assert_eq!(OffenceQueue::::get(2, 11), None); + assert_eq!(OffenceQueueEras::::get(), None); + + // processing offence points to next page. + assert_eq!( + ProcessingOffence::::get(), + Some(( + 2, + 11, + OffenceRecord { + reporter: None, + reported_era: 2, + // page 2 is processed, next page to be processed is 1. + exposure_page: 1, + slash_fraction: Perbill::from_percent(10), + prior_slash_fraction: Perbill::zero(), + } + )) + ); + + // there are two unapplied slashes for era 4. + assert_eq!(UnappliedSlashes::::iter_prefix(&5).collect::>().len(), 2); + + // ensure the last page of offence is processed. + // (offence is processed in reverse order of pages) + assert_eq!( + staking_events_since_last_call(), + vec![Event::SlashComputed { + offence_era: 2, + slash_era: 5, + offender: 11, + page: expected_page_count - 2 + },] + ); + + // Checkpoint 3: advancing to two more blocks will complete the processing of the + // reported offence + Session::roll_next(); + Session::roll_next(); + + // no processing offence. + assert!(ProcessingOffence::::get().is_none()); + // total of 4 unapplied slash. + assert_eq!(UnappliedSlashes::::iter_prefix(&5).collect::>().len(), 4); + + // Checkpoint 4: lets verify the application of slashes in multiple blocks. + // advance to era 4. + Session::roll_until_active_era(5); + // slashes are not applied just yet. From next blocks, they will be applied. + assert_eq!(UnappliedSlashes::::iter_prefix(&5).collect::>().len(), 4); + + // advance to next block. + Session::roll_next(); + // 1 slash is applied. + assert_eq!(UnappliedSlashes::::iter_prefix(&5).collect::>().len(), 3); + + // advance two blocks. + Session::roll_next(); + Session::roll_next(); + // 2 more slashes are applied. + assert_eq!(UnappliedSlashes::::iter_prefix(&5).collect::>().len(), 1); + + // advance one more block. + Session::roll_next(); + // all slashes are applied. + assert_eq!(UnappliedSlashes::::iter_prefix(&5).collect::>().len(), 0); + + // ensure all stakers are slashed correctly. + assert_eq!(asset::staked::(&11), 1000 - 100); + + for i in 0..200 { + let original_stake = 1000 + i as Balance; + let expected_slash = Perbill::from_percent(10) * original_stake; + assert_eq!(asset::staked::(&(1000 + i)), original_stake - expected_slash); + } + }) + } + + #[test] + fn offence_discarded_correctly() { + ExtBuilder::default().slash_defer_duration(3).build_and_execute(|| { + Session::roll_until_active_era(2); + let _ = staking_events_since_last_call(); + + // Scenario 1: 11 commits an offence in era 2. + add_slash(11); + + // offence is queued, not processed yet. + let queued_offence_one = OffenceQueue::::get(2, 11).unwrap(); + assert_eq!(queued_offence_one.slash_fraction, Perbill::from_percent(10)); + assert_eq!(queued_offence_one.prior_slash_fraction, Perbill::zero()); + assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); + + // Scenario 1A: 11 commits a second offence in era 2 with **lower** slash fraction than + // the previous offence. + add_slash_with_percent(11, 5); + + // the second offence is discarded. No change in the queue. + assert_eq!(OffenceQueue::::get(2, 11).unwrap(), queued_offence_one); + + // Scenario 1B: 11 commits a second offence in era 2 with **higher** slash fraction than + // the previous offence. + add_slash_with_percent(11, 15); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::OffenceReported { + offence_era: 2, + validator: 11, + fraction: Perbill::from_percent(10) + }, + Event::OffenceReported { + offence_era: 2, + validator: 11, + fraction: Perbill::from_percent(5) + }, + Event::OffenceReported { + offence_era: 2, + validator: 11, + fraction: Perbill::from_percent(15) + } + ] + ); + + // the second offence overwrites the first offence. + let overwritten_offence = OffenceQueue::::get(2, 11).unwrap(); + assert!(overwritten_offence.slash_fraction > queued_offence_one.slash_fraction); + assert_eq!(overwritten_offence.slash_fraction, Perbill::from_percent(15)); + assert_eq!(overwritten_offence.prior_slash_fraction, Perbill::zero()); + assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); + + // Scenario 2: 11 commits another offence in era 2, but after the previous offence is + // processed. + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SlashComputed { offence_era: 2, slash_era: 5, offender: 11, page: 0 }] + ); + + assert!(OffenceQueue::::get(2, 11).is_none()); + assert!(OffenceQueueEras::::get().is_none()); + // unapplied slash is created for the offence. + assert!(UnappliedSlashes::::contains_key(2 + 3, (11, Perbill::from_percent(15), 0))); + + // Scenario 2A: offence has **lower** slash fraction than the previous offence. + add_slash_with_percent(11, 14); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + offence_era: 2, + validator: 11, + fraction: Perbill::from_percent(14) + },] + ); + + // offence is discarded. + assert!(OffenceQueue::::get(2, 11).is_none()); + assert!(OffenceQueueEras::::get().is_none()); + + // Scenario 2B: offence has **higher** slash fraction than the previous offence. + add_slash_with_percent(11, 16); + assert_eq!( + staking_events_since_last_call(), + vec![Event::OffenceReported { + offence_era: 2, + validator: 11, + fraction: Perbill::from_percent(16) + },] + ); + + // process offence + Session::roll_next(); + assert_eq!( + staking_events_since_last_call(), + vec![Event::SlashComputed { offence_era: 2, slash_era: 5, offender: 11, page: 0 }] + ); + + // there are now two slash records for 11, for era 5, with the newer one only slashing + // the diff between slash fractions of 16 and 15. + let slash_one = + UnappliedSlashes::::get(2 + 3, (11, Perbill::from_percent(15), 0)).unwrap(); + let slash_two = + UnappliedSlashes::::get(2 + 3, (11, Perbill::from_percent(16), 0)).unwrap(); + assert!(slash_one.own > slash_two.own); + }); + } + + #[test] + fn offence_eras_queued_correctly() { + ExtBuilder::default().build_and_execute(|| { + // 11 and 21 are validators. + assert_eq!(Staking::status(&11).unwrap(), StakerStatus::Validator); + assert_eq!(Staking::status(&21).unwrap(), StakerStatus::Validator); + + Session::roll_until_active_era(2); + + // 11 and 21 commits offence in era 2. + add_slash_in_era(11, 2); + add_slash_in_era(21, 2); + + // 11 and 21 commits offence in era 1 but reported after the era 2 offence. + add_slash_in_era(11, 1); + add_slash_in_era(21, 1); + + // queued offence eras are sorted. + assert_eq!(OffenceQueueEras::::get().unwrap(), vec![1, 2]); + + // next two blocks, the offence in era 1 is processed. + Session::roll_next(); + Session::roll_next(); + + // only era 2 is left in the queue. + assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); + + // next block, the offence in era 2 is processed. + Session::roll_next(); + + // era still exist in the queue. + assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); + + // next block, the era 2 is processed. + Session::roll_next(); + + // queue is empty. + assert_eq!(OffenceQueueEras::::get(), None); + }); + } + + #[test] + fn non_deferred_slash_applied_instantly() { + ExtBuilder::default().build_and_execute(|| { + Session::roll_until_active_era(2); + + let validator_stake = asset::staked::(&11); + let slash_fraction = Perbill::from_percent(10); + let expected_slash = slash_fraction * validator_stake; + let _ = staking_events_since_last_call(); + + // report an offence for 11 in era 1. + add_slash_in_era_with_value(11, 1, slash_fraction); + + // ensure offence is queued. + assert_eq!( + staking_events_since_last_call().as_slice(), + vec![Event::OffenceReported { + validator: 11, + fraction: Perbill::from_percent(10), + offence_era: 1 + }] + ); + + // process offence + Session::roll_next(); + + // ensure slash is computed and applied. + assert_eq!( + staking_events_since_last_call().as_slice(), + vec![ + Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: expected_slash }, + // this is the nominator of 11. + Event::Slashed { staker: 101, amount: 25 }, + ] + ); + + // ensure validator is slashed. + assert_eq!(asset::staked::(&11), validator_stake - expected_slash); + }); + } + + #[test] + fn validator_with_no_exposure_slashed() { + ExtBuilder::default().build_and_execute(|| { + let validator_stake = asset::staked::(&11); + let slash_fraction = Perbill::from_percent(10); + let expected_slash = slash_fraction * validator_stake; + + // only 101 nominates 11, lets remove them. + assert_ok!(Staking::nominate(RuntimeOrigin::signed(101), vec![21])); + + Session::roll_until_active_era(2); + + // ensure validator has no exposure. + assert_eq!(ErasStakersOverview::::get(2, 11).unwrap().page_count, 0,); + + // clear events + let _ = staking_events_since_last_call(); + + // report an offence for 11. + add_slash_with_percent(11, 10); + Session::roll_next(); + + // ensure validator is slashed. + assert_eq!(asset::staked::(&11), validator_stake - expected_slash); + assert_eq!( + staking_events_since_last_call().as_slice(), + vec![ + Event::OffenceReported { + offence_era: 2, + validator: 11, + fraction: slash_fraction + }, + Event::SlashComputed { offence_era: 2, slash_era: 2, offender: 11, page: 0 }, + Event::Slashed { staker: 11, amount: expected_slash }, + ] + ); + }); + } +} diff --git a/substrate/frame/staking-async/src/weights.rs b/substrate/frame/staking-async/src/weights.rs new file mode 100644 index 0000000000000..d7a9b62293952 --- /dev/null +++ b/substrate/frame/staking-async/src/weights.rs @@ -0,0 +1,1705 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! Autogenerated weights for `pallet_staking_async` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-04-17, STEPS: `5`, REPEAT: `10`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `ggwpez-ref-hw`, CPU: `AMD EPYC 7232P 8-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` + +// Executed Command: +// ../../../../../target/release/frame-omni-bencher +// v1 +// benchmark +// pallet +// --pallet +// pallet_staking_async +// --extrinsic +// all +// --runtime +// ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.wasm +// --steps +// 5 +// --repeat +// 10 +// --genesis-builder-preset +// dot_size +// --template +// ../../../../../substrate/.maintain/frame-weight-template.hbs +// --heap-pages +// 65000 +// --output +// ./pallet_staking_async_dot_size.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] +#![allow(dead_code)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_staking_async`. +pub trait WeightInfo { + fn bond() -> Weight; + fn bond_extra() -> Weight; + fn unbond() -> Weight; + fn withdraw_unbonded_update(s: u32, ) -> Weight; + fn withdraw_unbonded_kill(s: u32, ) -> Weight; + fn validate() -> Weight; + fn kick(k: u32, ) -> Weight; + fn nominate(n: u32, ) -> Weight; + fn chill() -> Weight; + fn set_payee() -> Weight; + fn update_payee() -> Weight; + fn set_controller() -> Weight; + fn set_validator_count() -> Weight; + fn force_no_eras() -> Weight; + fn force_new_era() -> Weight; + fn force_new_era_always() -> Weight; + fn set_invulnerables(v: u32, ) -> Weight; + fn deprecate_controller_batch(u: u32, ) -> Weight; + fn force_unstake(s: u32, ) -> Weight; + fn cancel_deferred_slash(s: u32, ) -> Weight; + fn payout_stakers_alive_staked(n: u32, ) -> Weight; + fn rebond(l: u32, ) -> Weight; + fn reap_stash(s: u32, ) -> Weight; + fn set_staking_configs_all_set() -> Weight; + fn set_staking_configs_all_remove() -> Weight; + fn chill_other() -> Weight; + fn force_apply_min_commission() -> Weight; + fn set_min_commission() -> Weight; + fn restore_ledger() -> Weight; + fn migrate_currency() -> Weight; + fn apply_slash() -> Weight; + fn process_offence_queue() -> Weight; + fn rc_on_offence(v: u32, ) -> Weight; + fn rc_on_session_report() -> Weight; +} + +/// Weights for `pallet_staking_async` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn bond() -> Weight { + // Proof Size summary in bytes: + // Measured: `6750` + // Estimated: `4218` + // Minimum execution time: 167_701_000 picoseconds. + Weight::from_parts(169_311_000, 4218) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + fn bond_extra() -> Weight { + // Proof Size summary in bytes: + // Measured: `8079` + // Estimated: `8877` + // Minimum execution time: 9_824_554_000 picoseconds. + Weight::from_parts(12_021_250_000, 8877) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + fn unbond() -> Weight { + // Proof Size summary in bytes: + // Measured: `8259` + // Estimated: `8877` + // Minimum execution time: 12_169_771_000 picoseconds. + Weight::from_parts(13_911_804_000, 8877) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_update(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7406` + // Estimated: `4218` + // Minimum execution time: 187_501_000 picoseconds. + Weight::from_parts(190_541_700, 4218) + // Standard Error: 4_787 + .saturating_add(Weight::from_parts(9_065, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11638 + s * (5 ±0)` + // Minimum execution time: 12_770_155_000 picoseconds. + Weight::from_parts(19_677_901_566, 11638) + // Standard Error: 12_737_389 + .saturating_add(Weight::from_parts(70_782_130, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().writes(11_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5).saturating_mul(s.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn validate() -> Weight { + // Proof Size summary in bytes: + // Measured: `9938` + // Estimated: `4218` + // Minimum execution time: 179_051_000 picoseconds. + Weight::from_parts(181_951_000, 4218) + .saturating_add(T::DbWeight::get().reads(12_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// The range of component `k` is `[1, 128]`. + fn kick(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `11184 + k * (1132 ±0)` + // Estimated: `4218 + k * (3033 ±0)` + // Minimum execution time: 125_011_000 picoseconds. + Weight::from_parts(139_775_678, 4218) + // Standard Error: 124_229 + .saturating_add(Weight::from_parts(20_473_526, 0).saturating_mul(k.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16]`. + fn nominate(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `4898 + n * (71 ±0)` + // Estimated: `6248 + n * (2520 ±0)` + // Minimum execution time: 7_817_348_000 picoseconds. + Weight::from_parts(12_966_679_697, 6248) + // Standard Error: 70_710_808 + .saturating_add(Weight::from_parts(108_404_036, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(6_u64)) + .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn chill() -> Weight { + // Proof Size summary in bytes: + // Measured: `5073` + // Estimated: `6248` + // Minimum execution time: 4_771_225_000 picoseconds. + Weight::from_parts(11_721_807_000, 6248) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn set_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `4060` + // Estimated: `4218` + // Minimum execution time: 68_151_000 picoseconds. + Weight::from_parts(69_181_000, 4218) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `5732` + // Estimated: `4218` + // Minimum execution time: 84_081_000 picoseconds. + Weight::from_parts(86_531_000, 4218) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + fn set_controller() -> Weight { + // Proof Size summary in bytes: + // Measured: `5403` + // Estimated: `7446` + // Minimum execution time: 84_140_000 picoseconds. + Weight::from_parts(86_250_000, 7446) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) + } + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_validator_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_270_000 picoseconds. + Weight::from_parts(7_670_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_no_eras() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_090_000 picoseconds. + Weight::from_parts(28_140_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_new_era() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_520_000 picoseconds. + Weight::from_parts(28_100_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_new_era_always() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_651_000 picoseconds. + Weight::from_parts(27_950_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `v` is `[0, 20]`. + fn set_invulnerables(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_460_000 picoseconds. + Weight::from_parts(8_198_000, 0) + // Standard Error: 6_827 + .saturating_add(Weight::from_parts(23_133, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Ledger` (r:1502 w:1502) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:751 w:751) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:751 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// The range of component `u` is `[0, 751]`. + fn deprecate_controller_batch(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `99179 + u * (1300 ±0)` + // Estimated: `990 + u * (6456 ±0)` + // Minimum execution time: 13_550_000 picoseconds. + Weight::from_parts(282_563_888, 990) + // Standard Error: 138_635 + .saturating_add(Weight::from_parts(64_878_892, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 6456).saturating_mul(u.into())) + } + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn force_unstake(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11638 + s * (5 ±0)` + // Minimum execution time: 13_563_169_000 picoseconds. + Weight::from_parts(18_031_564_700, 11638) + // Standard Error: 15_252_700 + .saturating_add(Weight::from_parts(88_205_545, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().writes(12_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5).saturating_mul(s.into())) + } + /// Storage: `Staking::UnappliedSlashes` (r:1000 w:1000) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 1000]`. + fn cancel_deferred_slash(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `671 + s * (122 ±0)` + // Estimated: `990 + s * (5706 ±0)` + // Minimum execution time: 39_790_000 picoseconds. + Weight::from_parts(40_220_000, 990) + // Standard Error: 45_514 + .saturating_add(Weight::from_parts(13_403_172, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(s.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5706).saturating_mul(s.into())) + } + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ErasClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:65 w:65) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:65 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:65 w:65) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:65 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 64]`. + fn payout_stakers_alive_staked(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `20405 + n * (3951 ±0)` + // Estimated: `24715 + n * (3920 ±72)` + // Minimum execution time: 9_680_492_000 picoseconds. + Weight::from_parts(19_051_608_733, 24715) + // Standard Error: 48_415_392 + .saturating_add(Weight::from_parts(426_518_637, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(4_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 3920).saturating_mul(n.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// The range of component `l` is `[1, 32]`. + fn rebond(_l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8081 + l * (4 ±0)` + // Estimated: `8877` + // Minimum execution time: 10_942_970_000 picoseconds. + Weight::from_parts(17_242_952_692, 8877) + .saturating_add(T::DbWeight::get().reads(10_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. + fn reap_stash(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11682 + s * (4 ±0)` + // Minimum execution time: 14_166_693_000 picoseconds. + Weight::from_parts(22_160_899_341, 11682) + // Standard Error: 9_189_865 + .saturating_add(Weight::from_parts(61_155_081, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(12_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_staking_configs_all_set() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 13_480_000 picoseconds. + Weight::from_parts(13_920_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_staking_configs_all_remove() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_710_000 picoseconds. + Weight::from_parts(12_190_000, 0) + .saturating_add(T::DbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn chill_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `5165` + // Estimated: `6248` + // Minimum execution time: 9_587_859_000 picoseconds. + Weight::from_parts(11_081_581_000, 6248) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + fn force_apply_min_commission() -> Weight { + // Proof Size summary in bytes: + // Measured: `663` + // Estimated: `3510` + // Minimum execution time: 4_676_454_000 picoseconds. + Weight::from_parts(4_721_314_000, 3510) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_min_commission() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_120_000 picoseconds. + Weight::from_parts(7_490_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `7127` + // Estimated: `4764` + // Minimum execution time: 137_321_000 picoseconds. + Weight::from_parts(138_981_000, 4764) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + fn migrate_currency() -> Weight { + // Proof Size summary in bytes: + // Measured: `7039` + // Estimated: `4764` + // Minimum execution time: 204_921_000 picoseconds. + Weight::from_parts(208_462_000, 4764) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:65 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:65 w:65) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:65 w:65) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:65 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:65 w:65) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `216367` + // Estimated: `210810` + // Minimum execution time: 36_638_657_000 picoseconds. + Weight::from_parts(39_174_206_000, 210810) + .saturating_add(T::DbWeight::get().reads(457_u64)) + .saturating_add(T::DbWeight::get().writes(261_u64)) + } + /// Storage: `Staking::ProcessingOffence` (r:1 w:1) + /// Proof: `Staking::ProcessingOffence` (`max_values`: Some(1), `max_size`: Some(85), added: 580, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:2 w:1) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashRewardFraction` (r:1 w:0) + /// Proof: `Staking::SlashRewardFraction` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::SlashingSpans` (r:65 w:65) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::NominatorSlashInEra` (r:64 w:64) + /// Proof: `Staking::NominatorSlashInEra` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:64 w:64) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// Storage: `Staking::UnappliedSlashes` (r:0 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + fn process_offence_queue() -> Weight { + // Proof Size summary in bytes: + // Measured: `5078` + // Estimated: `166943` + // Minimum execution time: 43_344_626_000 picoseconds. + Weight::from_parts(50_068_965_000, 166943) + .saturating_add(T::DbWeight::get().reads(200_u64)) + .saturating_add(T::DbWeight::get().writes(197_u64)) + } + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::Invulnerables` (r:1 w:0) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:500 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorSlashInEra` (r:500 w:500) + /// Proof: `Staking::ValidatorSlashInEra` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:500 w:500) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// The range of component `v` is `[2, 500]`. + fn rc_on_offence(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `517 + v * (96 ±0)` + // Estimated: `3481 + v * (2576 ±0)` + // Minimum execution time: 4_330_171_000 picoseconds. + Weight::from_parts(22_551_592_842, 3481) + // Standard Error: 13_047_521 + .saturating_add(Weight::from_parts(130_976_524, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2576).saturating_mul(v.into())) + } + /// Storage: `Staking::ActiveEra` (r:1 w:1) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:0) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:1 w:0) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::BondedEras` (r:1 w:1) + /// Proof: `Staking::BondedEras` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ForceEra` (r:1 w:0) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::VoterSnapshotStatus` (r:0 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:0 w:1) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::NextElectionPage` (r:0 w:1) + /// Proof: `Staking::NextElectionPage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ElectableStashes` (r:0 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `MaxEncodedLen`) + fn rc_on_session_report() -> Weight { + // Proof Size summary in bytes: + // Measured: `1067` + // Estimated: `4532` + // Minimum execution time: 12_249_491_000 picoseconds. + Weight::from_parts(13_956_694_000, 4532) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes(8_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn bond() -> Weight { + // Proof Size summary in bytes: + // Measured: `6750` + // Estimated: `4218` + // Minimum execution time: 167_701_000 picoseconds. + Weight::from_parts(169_311_000, 4218) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + fn bond_extra() -> Weight { + // Proof Size summary in bytes: + // Measured: `8079` + // Estimated: `8877` + // Minimum execution time: 9_824_554_000 picoseconds. + Weight::from_parts(12_021_250_000, 8877) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + fn unbond() -> Weight { + // Proof Size summary in bytes: + // Measured: `8259` + // Estimated: `8877` + // Minimum execution time: 12_169_771_000 picoseconds. + Weight::from_parts(13_911_804_000, 8877) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:1 w:0) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_update(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7406` + // Estimated: `4218` + // Minimum execution time: 187_501_000 picoseconds. + Weight::from_parts(190_541_700, 4218) + // Standard Error: 4_787 + .saturating_add(Weight::from_parts(9_065, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11638 + s * (5 ±0)` + // Minimum execution time: 12_770_155_000 picoseconds. + Weight::from_parts(19_677_901_566, 11638) + // Standard Error: 12_737_389 + .saturating_add(Weight::from_parts(70_782_130, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) + .saturating_add(RocksDbWeight::get().writes(11_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5).saturating_mul(s.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:1 w:0) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:1 w:1) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:1) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn validate() -> Weight { + // Proof Size summary in bytes: + // Measured: `9938` + // Estimated: `4218` + // Minimum execution time: 179_051_000 picoseconds. + Weight::from_parts(181_951_000, 4218) + .saturating_add(RocksDbWeight::get().reads(12_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:128 w:128) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// The range of component `k` is `[1, 128]`. + fn kick(k: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `11184 + k * (1132 ±0)` + // Estimated: `4218 + k * (3033 ±0)` + // Minimum execution time: 125_011_000 picoseconds. + Weight::from_parts(139_775_678, 4218) + // Standard Error: 124_229 + .saturating_add(Weight::from_parts(20_473_526, 0).saturating_mul(k.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) + .saturating_add(Weight::from_parts(0, 3033).saturating_mul(k.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:17 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// The range of component `n` is `[1, 16]`. + fn nominate(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `4898 + n * (71 ±0)` + // Estimated: `6248 + n * (2520 ±0)` + // Minimum execution time: 7_817_348_000 picoseconds. + Weight::from_parts(12_966_679_697, 6248) + // Standard Error: 70_710_808 + .saturating_add(Weight::from_parts(108_404_036, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn chill() -> Weight { + // Proof Size summary in bytes: + // Measured: `5073` + // Estimated: `6248` + // Minimum execution time: 4_771_225_000 picoseconds. + Weight::from_parts(11_721_807_000, 6248) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn set_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `4060` + // Estimated: `4218` + // Minimum execution time: 68_151_000 picoseconds. + Weight::from_parts(69_181_000, 4218) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `5732` + // Estimated: `4218` + // Minimum execution time: 84_081_000 picoseconds. + Weight::from_parts(86_531_000, 4218) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2 w:2) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + fn set_controller() -> Weight { + // Proof Size summary in bytes: + // Measured: `5403` + // Estimated: `7446` + // Minimum execution time: 84_140_000 picoseconds. + Weight::from_parts(86_250_000, 7446) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + } + /// Storage: `Staking::ValidatorCount` (r:0 w:1) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_validator_count() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_270_000 picoseconds. + Weight::from_parts(7_670_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_no_eras() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_090_000 picoseconds. + Weight::from_parts(28_140_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_new_era() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_520_000 picoseconds. + Weight::from_parts(28_100_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::ForceEra` (r:0 w:1) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + fn force_new_era_always() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 27_651_000 picoseconds. + Weight::from_parts(27_950_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Invulnerables` (r:0 w:1) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `v` is `[0, 20]`. + fn set_invulnerables(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_460_000 picoseconds. + Weight::from_parts(8_198_000, 0) + // Standard Error: 6_827 + .saturating_add(Weight::from_parts(23_133, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::Ledger` (r:1502 w:1502) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:751 w:751) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:751 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// The range of component `u` is `[0, 751]`. + fn deprecate_controller_batch(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `99179 + u * (1300 ±0)` + // Estimated: `990 + u * (6456 ±0)` + // Minimum execution time: 13_550_000 picoseconds. + Weight::from_parts(282_563_888, 990) + // Standard Error: 138_635 + .saturating_add(Weight::from_parts(64_878_892, 0).saturating_mul(u.into())) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(u.into()))) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 6456).saturating_mul(u.into())) + } + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 100]`. + fn force_unstake(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11638 + s * (5 ±0)` + // Minimum execution time: 13_563_169_000 picoseconds. + Weight::from_parts(18_031_564_700, 11638) + // Standard Error: 15_252_700 + .saturating_add(Weight::from_parts(88_205_545, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(14_u64)) + .saturating_add(RocksDbWeight::get().writes(12_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5).saturating_mul(s.into())) + } + /// Storage: `Staking::UnappliedSlashes` (r:1000 w:1000) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 1000]`. + fn cancel_deferred_slash(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `671 + s * (122 ±0)` + // Estimated: `990 + s * (5706 ±0)` + // Minimum execution time: 39_790_000 picoseconds. + Weight::from_parts(40_220_000, 990) + // Standard Error: 45_514 + .saturating_add(Weight::from_parts(13_403_172, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(s.into()))) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 5706).saturating_mul(s.into())) + } + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasClaimedRewards` (r:1 w:1) + /// Proof: `Staking::ErasClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:65 w:65) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:65 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:65 w:65) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:65 w:0) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 64]`. + fn payout_stakers_alive_staked(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `20405 + n * (3951 ±0)` + // Estimated: `24715 + n * (3920 ±72)` + // Minimum execution time: 9_680_492_000 picoseconds. + Weight::from_parts(19_051_608_733, 24715) + // Standard Error: 48_415_392 + .saturating_add(Weight::from_parts(426_518_637, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 3920).saturating_mul(n.into())) + } + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:3 w:3) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:2 w:2) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// The range of component `l` is `[1, 32]`. + fn rebond(_l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8081 + l * (4 ±0)` + // Estimated: `8877` + // Minimum execution time: 10_942_970_000 picoseconds. + Weight::from_parts(17_242_952_692, 8877) + .saturating_add(RocksDbWeight::get().reads(10_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::VirtualStakers` (r:1 w:1) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashingSpans` (r:1 w:1) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:0 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:0 w:100) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 100]`. + fn reap_stash(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `8216 + s * (4 ±0)` + // Estimated: `11682 + s * (4 ±0)` + // Minimum execution time: 14_166_693_000 picoseconds. + Weight::from_parts(22_160_899_341, 11682) + // Standard Error: 9_189_865 + .saturating_add(Weight::from_parts(61_155_081, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().writes(12_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_staking_configs_all_set() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 13_480_000 picoseconds. + Weight::from_parts(13_920_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinValidatorBond` (r:0 w:1) + /// Proof: `Staking::MinValidatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxValidatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxValidatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:0 w:1) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:0 w:1) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:0 w:1) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:0 w:1) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_staking_configs_all_remove() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_710_000 picoseconds. + Weight::from_parts(12_190_000, 0) + .saturating_add(RocksDbWeight::get().writes(7_u64)) + } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:1) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::ChillThreshold` (r:1 w:0) + /// Proof: `Staking::ChillThreshold` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxNominatorsCount` (r:1 w:0) + /// Proof: `Staking::MaxNominatorsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForNominators` (r:1 w:1) + /// Proof: `Staking::CounterForNominators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinNominatorBond` (r:1 w:0) + /// Proof: `Staking::MinNominatorBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `VoterList::Lock` (r:1 w:0) + /// Proof: `VoterList::Lock` (`max_values`: Some(1), `max_size`: Some(0), added: 495, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2 w:2) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:1 w:1) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::CounterForListNodes` (r:1 w:1) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn chill_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `5165` + // Estimated: `6248` + // Minimum execution time: 9_587_859_000 picoseconds. + Weight::from_parts(11_081_581_000, 6248) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().writes(6_u64)) + } + /// Storage: `Staking::MinCommission` (r:1 w:0) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:1) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + fn force_apply_min_commission() -> Weight { + // Proof Size summary in bytes: + // Measured: `663` + // Estimated: `3510` + // Minimum execution time: 4_676_454_000 picoseconds. + Weight::from_parts(4_721_314_000, 3510) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Staking::MinCommission` (r:0 w:1) + /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_min_commission() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_120_000 picoseconds. + Weight::from_parts(7_490_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:0) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:0) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:1) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + fn restore_ledger() -> Weight { + // Proof Size summary in bytes: + // Measured: `7127` + // Estimated: `4764` + // Minimum execution time: 137_321_000 picoseconds. + Weight::from_parts(138_981_000, 4764) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::VirtualStakers` (r:1 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `Balances::Locks` (r:1 w:1) + /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) + /// Storage: `Balances::Freezes` (r:1 w:0) + /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(85), added: 2560, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:1 w:1) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + fn migrate_currency() -> Weight { + // Proof Size summary in bytes: + // Measured: `7039` + // Estimated: `4764` + // Minimum execution time: 204_921_000 picoseconds. + Weight::from_parts(208_462_000, 4764) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + } + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(753), added: 3228, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::ReversePoolIdLookup` (r:65 w:0) + /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `DelegatedStaking::Agents` (r:65 w:65) + /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:65 w:65) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::VirtualStakers` (r:65 w:0) + /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + /// Storage: `Balances::Holds` (r:65 w:65) + /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(193), added: 2668, mode: `MaxEncodedLen`) + fn apply_slash() -> Weight { + // Proof Size summary in bytes: + // Measured: `216367` + // Estimated: `210810` + // Minimum execution time: 36_638_657_000 picoseconds. + Weight::from_parts(39_174_206_000, 210810) + .saturating_add(RocksDbWeight::get().reads(457_u64)) + .saturating_add(RocksDbWeight::get().writes(261_u64)) + } + /// Storage: `Staking::ProcessingOffence` (r:1 w:1) + /// Proof: `Staking::ProcessingOffence` (`max_values`: Some(1), `max_size`: Some(85), added: 580, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:2 w:1) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::SlashRewardFraction` (r:1 w:0) + /// Proof: `Staking::SlashRewardFraction` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::SlashingSpans` (r:65 w:65) + /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::NominatorSlashInEra` (r:64 w:64) + /// Proof: `Staking::NominatorSlashInEra` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) + /// Storage: `Staking::SpanSlash` (r:64 w:64) + /// Proof: `Staking::SpanSlash` (`max_values`: None, `max_size`: Some(76), added: 2551, mode: `MaxEncodedLen`) + /// Storage: `Staking::UnappliedSlashes` (r:0 w:1) + /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) + fn process_offence_queue() -> Weight { + // Proof Size summary in bytes: + // Measured: `5078` + // Estimated: `166943` + // Minimum execution time: 43_344_626_000 picoseconds. + Weight::from_parts(50_068_965_000, 166943) + .saturating_add(RocksDbWeight::get().reads(200_u64)) + .saturating_add(RocksDbWeight::get().writes(197_u64)) + } + /// Storage: `Staking::ActiveEra` (r:1 w:0) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::Invulnerables` (r:1 w:0) + /// Proof: `Staking::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersOverview` (r:500 w:0) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorSlashInEra` (r:500 w:500) + /// Proof: `Staking::ValidatorSlashInEra` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueue` (r:500 w:500) + /// Proof: `Staking::OffenceQueue` (`max_values`: None, `max_size`: Some(101), added: 2576, mode: `MaxEncodedLen`) + /// Storage: `Staking::OffenceQueueEras` (r:1 w:1) + /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) + /// The range of component `v` is `[2, 500]`. + fn rc_on_offence(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `517 + v * (96 ±0)` + // Estimated: `3481 + v * (2576 ±0)` + // Minimum execution time: 4_330_171_000 picoseconds. + Weight::from_parts(22_551_592_842, 3481) + // Standard Error: 13_047_521 + .saturating_add(Weight::from_parts(130_976_524, 0).saturating_mul(v.into())) + .saturating_add(RocksDbWeight::get().reads(4_u64)) + .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2576).saturating_mul(v.into())) + } + /// Storage: `Staking::ActiveEra` (r:1 w:1) + /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasRewardPoints` (r:1 w:1) + /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::CurrentEra` (r:1 w:0) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:1 w:0) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::MaxStakedRewards` (r:1 w:0) + /// Proof: `Staking::MaxStakedRewards` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::BondedEras` (r:1 w:1) + /// Proof: `Staking::BondedEras` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ForceEra` (r:1 w:0) + /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::VoterSnapshotStatus` (r:0 w:1) + /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorReward` (r:0 w:1) + /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::NextElectionPage` (r:0 w:1) + /// Proof: `Staking::NextElectionPage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ElectableStashes` (r:0 w:1) + /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `MaxEncodedLen`) + fn rc_on_session_report() -> Weight { + // Proof Size summary in bytes: + // Measured: `1067` + // Estimated: `4532` + // Minimum execution time: 12_249_491_000 picoseconds. + Weight::from_parts(13_956_694_000, 4532) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes(8_u64)) + } +} diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 75d0a76b7db7f..d265f547c40d0 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -160,12 +160,14 @@ //! //! ``` //! use pallet_staking::{self as staking}; +//! use frame_support::traits::RewardsReporter; //! //! #[frame_support::pallet(dev_mode)] //! pub mod pallet { //! use super::*; //! use frame_support::pallet_prelude::*; //! use frame_system::pallet_prelude::*; +//! # use frame_support::traits::RewardsReporter; //! //! #[pallet::pallet] //! pub struct Pallet(_); @@ -219,8 +221,8 @@ //! [here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model). //! //! Total reward is split among validators and their nominators depending on the number of points -//! they received during the era. Points are added to a validator using -//! [`reward_by_ids`](Pallet::reward_by_ids). +//! they received during the era. Points are added to a validator using the method +//! [`frame_support::traits::RewardsReporter::reward_by_ids`] implemented by the [`Pallet`]. //! //! [`Pallet`] implements [`pallet_authorship::EventHandler`] to add reward points to block producer //! and block producer of referenced uncles. @@ -308,9 +310,8 @@ mod pallet; extern crate alloc; use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; -use codec::{ - Decode, DecodeWithMemTracking, Encode, EncodeLike, HasCompact, Input, MaxEncodedLen, Output, -}; +use codec::{Decode, DecodeWithMemTracking, Encode, HasCompact, MaxEncodedLen}; +use frame_election_provider_support::ElectionProvider; use frame_support::{ defensive, defensive_assert, traits::{ @@ -329,7 +330,6 @@ use sp_runtime::{ use sp_staking::{ offence::{Offence, OffenceError, OffenceSeverity, ReportOffence}, EraIndex, ExposurePage, OnStakingUpdate, Page, PagedExposureMetadata, SessionIndex, - StakingAccount, }; pub use sp_staking::{Exposure, IndividualExposure, StakerStatus}; pub use weights::WeightInfo; @@ -350,9 +350,12 @@ macro_rules! log { }; } -/// Maximum number of winners (aka. active validators), as defined in the election provider of this -/// pallet. -pub type MaxWinnersOf = <::ElectionProvider as frame_election_provider_support::ElectionProviderBase>::MaxWinners; +/// Alias for the maximum number of winners (aka. active validators), as defined in by this pallet's +/// config. +pub type MaxWinnersOf = ::MaxValidatorSet; + +/// Alias for the maximum number of winners per page, as expected by the election provider. +pub type MaxWinnersPerPageOf

=

::MaxWinnersPerPage; /// Maximum number of nominations per nominator. pub type MaxNominationsOf = @@ -371,7 +374,7 @@ pub type NegativeImbalanceOf = type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// Information regarding the active era (era in used in session). -#[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[derive(Encode, Decode, Clone, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ActiveEraInfo { /// Index of era. pub index: EraIndex, @@ -1053,16 +1056,6 @@ impl Default for Forcing { } } -/// A `Convert` implementation that finds the stash of the given controller account, -/// if any. -pub struct StashOf(core::marker::PhantomData); - -impl Convert> for StashOf { - fn convert(controller: T::AccountId) -> Option { - StakingLedger::::paired_account(StakingAccount::Controller(controller)) - } -} - /// A typed conversion from stash account ID to the active exposure of nominators /// on that account. /// @@ -1081,69 +1074,54 @@ impl Convert } } -/// A type representing the presence of a validator. Encodes as a unit type. -pub type Existence = (); - -/// A converter type that returns `Some(())` if the validator exists in the current active era, -/// otherwise `None`. This serves as a lightweight presence check for validators. -pub struct ExistenceOf(core::marker::PhantomData); -impl Convert> for ExistenceOf { - fn convert(validator: T::AccountId) -> Option { - Validators::::contains_key(&validator).then_some(()) - } -} - -/// A compatibility wrapper type used to represent the presence of a validator in the current era. -/// Encodes as type [`Existence`] but can decode from legacy [`Exposure`] values for backward -/// compatibility. -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, RuntimeDebug, TypeInfo, DecodeWithMemTracking)] -pub enum ExistenceOrLegacyExposure { - /// Validator exists in the current era. - Exists, - /// Legacy `Exposure` data, retained for decoding compatibility. - Exposure(Exposure), -} - -/// Converts a validator account ID to a Some([`ExistenceOrLegacyExposure::Exists`]) if the -/// validator exists in the current era, otherwise `None`. -pub struct ExistenceOrLegacyExposureOf(core::marker::PhantomData); - -impl Convert>>> - for ExistenceOrLegacyExposureOf -{ - fn convert( - validator: T::AccountId, - ) -> Option>> { - ActiveEra::::get() - .map(|active_era| ErasStakersOverview::::contains_key(active_era.index, &validator)) - .unwrap_or(false) - .then_some(ExistenceOrLegacyExposure::Exists) - } -} +/// Identify a validator with their default exposure. +/// +/// This type should not be used in a fresh runtime, instead use [`UnitIdentificationOf`]. +/// +/// In the past, a type called [`ExposureOf`] used to return the full exposure of a validator to +/// identify their exposure. This type is kept, marked as deprecated, for backwards compatibility of +/// external SDK users, but is no longer used in this repo. +/// +/// In the new model, we don't need to identify a validator with their full exposure anymore, and +/// therefore [`UnitIdentificationOf`] is perfectly fine. Yet, for runtimes that used to work with +/// [`ExposureOf`], we need to be able to decode old identification data, possibly stored in the +/// historical session pallet in older blocks. Therefore, this type is a good compromise, allowing +/// old exposure identifications to be decoded, and returning a few zero bytes +/// (`Exposure::default`) for any new identification request. +/// +/// A typical usage of this type is: +/// +/// ```ignore +/// impl pallet_session::historical::Config for Runtime { +/// type FullIdentification = sp_staking::Exposure; +/// type IdentificationOf = pallet_staking::DefaultExposureOf +/// } +/// ``` +pub struct DefaultExposureOf(core::marker::PhantomData); -impl Encode for ExistenceOrLegacyExposure -where - Exposure: Encode, +impl Convert>>> + for DefaultExposureOf { - fn encode_to(&self, dest: &mut T) { - match self { - ExistenceOrLegacyExposure::Exists => (), - ExistenceOrLegacyExposure::Exposure(exposure) => exposure.encode_to(dest), - } + fn convert(validator: T::AccountId) -> Option>> { + T::SessionInterface::validators() + .contains(&validator) + .then_some(Default::default()) } } -impl EncodeLike for ExistenceOrLegacyExposure where Exposure: Encode {} - -impl Decode for ExistenceOrLegacyExposure -where - Exposure: Decode, -{ - fn decode(input: &mut I) -> Result { - match input.remaining_len() { - Ok(Some(x)) if x > 0 => Ok(ExistenceOrLegacyExposure::Exposure(Decode::decode(input)?)), - _ => Ok(ExistenceOrLegacyExposure::Exists), - } +/// An identification type that signifies the existence of a validator by returning `Some(())`, and +/// `None` otherwise. Also see the documentation of [`DefaultExposureOf`] for more info. +/// +/// ```ignore +/// impl pallet_session::historical::Config for Runtime { +/// type FullIdentification = (); +/// type IdentificationOf = pallet_staking::UnitIdentificationOf +/// } +/// ``` +pub struct UnitIdentificationOf(core::marker::PhantomData); +impl Convert> for UnitIdentificationOf { + fn convert(validator: T::AccountId) -> Option<()> { + DefaultExposureOf::::convert(validator).map(|_default_exposure| ()) } } @@ -1432,52 +1410,3 @@ impl BenchmarkingConfig for TestBenchmarkingConfig { type MaxValidators = frame_support::traits::ConstU32<100>; type MaxNominators = frame_support::traits::ConstU32<100>; } - -#[cfg(test)] -mod test { - use crate::ExistenceOrLegacyExposure; - use codec::{Decode, Encode}; - use sp_staking::{Exposure, IndividualExposure}; - - #[test] - fn existence_encodes_decodes_correctly() { - let encoded_existence = ExistenceOrLegacyExposure::::Exists.encode(); - assert!(encoded_existence.is_empty()); - - // try decoding the existence - let decoded_existence = - ExistenceOrLegacyExposure::::decode(&mut encoded_existence.as_slice()) - .unwrap(); - assert!(matches!(decoded_existence, ExistenceOrLegacyExposure::Exists)); - - // check that round-trip encoding works - assert_eq!(encoded_existence, decoded_existence.encode()); - } - - #[test] - fn legacy_existence_encodes_decodes_correctly() { - let legacy_exposure = Exposure:: { - total: 1, - own: 2, - others: vec![IndividualExposure { who: 3, value: 4 }], - }; - - let encoded_legacy_exposure = legacy_exposure.encode(); - - // try decoding the legacy exposure - let decoded_legacy_exposure = - ExistenceOrLegacyExposure::::decode(&mut encoded_legacy_exposure.as_slice()) - .unwrap(); - assert_eq!( - decoded_legacy_exposure, - ExistenceOrLegacyExposure::Exposure(Exposure { - total: 1, - own: 2, - others: vec![IndividualExposure { who: 3, value: 4 }] - }) - ); - - // round trip encoding works - assert_eq!(encoded_legacy_exposure, decoded_legacy_exposure.encode()); - } -} diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 49e8b9705f9f0..732c23562300c 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -20,25 +20,28 @@ use crate::{self as pallet_staking, *}; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, - onchain, SequentialPhragmen, VoteWeight, + onchain, BoundedSupports, SequentialPhragmen, Support, VoteWeight, }; use frame_support::{ assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU64, EitherOfDiverse, FindAuthor, Get, Imbalance, OnUnbalanced, OneSessionHandler, + RewardsReporter, }, weights::constants::RocksDbWeight, }; use frame_system::{EnsureRoot, EnsureSignedBy}; +use sp_core::ConstBool; use sp_io; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{ offence::{OffenceDetails, OnOffenceHandler}, - OnStakingUpdate, + OnStakingUpdate, StakingAccount, }; pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; +pub(crate) const SINGLE_PAGE: u32 = 0; /// The AccountId alias in this test module. pub(crate) type AccountId = u64; @@ -143,7 +146,7 @@ impl pallet_session::Config for Test { type SessionHandler = (OtherSessionHandler,); type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; - type ValidatorIdOf = crate::StashOf; + type ValidatorIdOf = sp_runtime::traits::ConvertInto; type NextSessionRotation = pallet_session::PeriodicSessions; type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy; @@ -151,8 +154,9 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = Existence; - type FullIdentificationOf = ExistenceOf; + type RuntimeEvent = RuntimeEvent; + type FullIdentification = (); + type FullIdentificationOf = crate::UnitIdentificationOf; } impl pallet_authorship::Config for Test { type FindAuthor = Author11; @@ -205,7 +209,7 @@ parameter_types! { pub static MaxExposurePageSize: u32 = 64; pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; - pub static MaxWinners: u32 = 100; + pub static MaxValidatorSet: u32 = 100; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); pub static AbsoluteMaxNominations: u32 = 16; } @@ -220,14 +224,20 @@ impl pallet_bags_list::Config for Test { type Score = VoteWeight; } +parameter_types! { + pub static MaxBackersPerWinner: u32 = 256; + pub static MaxWinnersPerPage: u32 = MaxValidatorSet::get(); +} pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { type System = Test; type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinners = MaxWinners; + type MaxBackersPerWinner = MaxBackersPerWinner; + type MaxWinnersPerPage = MaxWinnersPerPage; type Bounds = ElectionsBounds; + type Sort = ConstBool; } pub struct MockReward {} @@ -285,6 +295,7 @@ impl crate::pallet::pallet::Config for Test { type EraPayout = ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = MaxExposurePageSize; + type MaxValidatorSet = MaxValidatorSet; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. @@ -959,3 +970,13 @@ pub(crate) fn restrict(who: &AccountId) { pub(crate) fn remove_from_restrict_list(who: &AccountId) { RestrictedAccounts::mutate(|l| l.retain(|x| x != who)); } + +pub(crate) fn to_bounded_supports( + supports: Vec<(AccountId, Support)>, +) -> BoundedSupports< + AccountId, + <::ElectionProvider as ElectionProvider>::MaxWinnersPerPage, + <::ElectionProvider as ElectionProvider>::MaxBackersPerWinner, +> { + supports.try_into().unwrap() +} diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index a1cebec836629..c377ca77b5381 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -20,7 +20,7 @@ use frame_election_provider_support::{ bounds::{CountBound, SizeBound}, data_provider, BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider, - ScoreProvider, SortedListProvider, VoteWeight, VoterOf, + PageIndex, ScoreProvider, SortedListProvider, TryFromOtherBounds, VoteWeight, VoterOf, }; use frame_support::{ defensive, @@ -28,7 +28,8 @@ use frame_support::{ pallet_prelude::*, traits::{ Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, - InspectLockableCurrency, Len, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, + InspectLockableCurrency, Len, LockableCurrency, OnUnbalanced, RewardsReporter, TryCollect, + UnixTime, }, weights::Weight, }; @@ -50,10 +51,9 @@ use sp_staking::{ use crate::{ asset, election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, - BalanceOf, EraInfo, EraPayout, Existence, ExistenceOrLegacyExposure, Exposure, Forcing, - IndividualExposure, LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, - NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, - UnlockChunk, ValidatorPrefs, STAKING_ID, + BalanceOf, EraInfo, EraPayout, Exposure, Forcing, IndividualExposure, LedgerIntegrityState, + MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, + RewardDestination, SessionInterface, StakingLedger, UnlockChunk, ValidatorPrefs, STAKING_ID, }; use alloc::{boxed::Box, vec, vec::Vec}; @@ -641,24 +641,24 @@ impl Pallet { start_session_index: SessionIndex, is_genesis: bool, ) -> Option>> { - let election_result: BoundedVec<_, MaxWinnersOf> = if is_genesis { - let result = ::elect().map_err(|e| { - log!(warn, "genesis election provider failed due to {:?}", e); - Self::deposit_event(Event::StakingElectionFailed); - }); + let election_result = if is_genesis { + // This pallet only supports single page elections. + let result = ::elect(0) + .map_err(|e| { + log!(warn, "genesis election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); + }) + .ok()?; - result - .ok()? - .into_inner() - .try_into() - // both bounds checked in integrity test to be equal - .defensive_unwrap_or_default() + BoundedSupportsOf::::try_from_other_bounds(result).ok()? } else { - let result = ::elect().map_err(|e| { - log!(warn, "election provider failed due to {:?}", e); - Self::deposit_event(Event::StakingElectionFailed); - }); - result.ok()? + // This pallet only supports single page elections. + ::elect(0) + .map_err(|e| { + log!(warn, "election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); + }) + .ok()? }; let exposures = Self::collect_exposures(election_result); @@ -845,7 +845,7 @@ impl Pallet { /// relatively to their points. /// /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. - pub fn reward_by_ids(validators_points: impl IntoIterator) { + fn reward_by_ids(validators_points: impl IntoIterator) { if let Some(active_era) = ActiveEra::::get() { >::mutate(active_era.index, |era_rewards| { for (validator, points) in validators_points.into_iter() { @@ -1260,6 +1260,7 @@ impl Pallet { let active_era = ActiveEra::::get(); add_db_reads_writes(1, 0); if active_era.is_none() { + log!(warn, "🦹 on_offence: Active era not set -- not processing offence"); // This offence need not be re-submitted. return consumed_weight } @@ -1267,7 +1268,7 @@ impl Pallet { }; let active_era_start_session_index = ErasStartSessionIndex::::get(active_era) .unwrap_or_else(|| { - frame_support::print("Error: start_session_index must be set for current_era"); + log!(error, "🦹 on_offence: start_session_index must be set for current_era"); 0 }); add_db_reads_writes(1, 0); @@ -1286,7 +1287,10 @@ impl Pallet { match eras.iter().rev().find(|&(_, sesh)| sesh <= &slash_session) { Some((slash_era, _)) => *slash_era, // Before bonding period. defensive - should be filtered out. - None => return consumed_weight, + None => { + log!(warn, "🦹 on_offence: bonded era not found"); + return consumed_weight + }, } }; @@ -1498,7 +1502,10 @@ impl ElectionDataProvider for Pallet { Ok(ValidatorCount::::get()) } - fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>> { + fn electing_voters( + bounds: DataProviderBounds, + _page: PageIndex, + ) -> data_provider::Result>> { // This can never fail -- if `maybe_max_len` is `Some(_)` we handle it. let voters = Self::get_npos_voters(bounds); @@ -1510,7 +1517,10 @@ impl ElectionDataProvider for Pallet { Ok(voters) } - fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { + fn electable_targets( + bounds: DataProviderBounds, + _page: PageIndex, + ) -> data_provider::Result> { let targets = Self::get_npos_targets(bounds); // We can't handle this case yet -- return an error. WIP to improve handling this case in @@ -1658,23 +1668,34 @@ impl pallet_session::SessionManager for Pallet { } } -impl - historical::SessionManager>> +impl historical::SessionManager>> for Pallet { fn new_session( new_index: SessionIndex, - ) -> Option>)>> { + ) -> Option>)>> { >::new_session(new_index).map(|validators| { - validators.into_iter().map(|v| (v, ExistenceOrLegacyExposure::Exists)).collect() + validators + .into_iter() + .map(|v| { + let exposure = Exposure::>::default(); + (v, exposure) + }) + .collect() }) } fn new_session_genesis( new_index: SessionIndex, - ) -> Option>)>> { + ) -> Option>)>> { >::new_session_genesis(new_index).map( |validators| { - validators.into_iter().map(|v| (v, ExistenceOrLegacyExposure::Exists)).collect() + validators + .into_iter() + .map(|v| { + let exposure = Exposure::>::default(); + (v, exposure) + }) + .collect() }, ) } @@ -1686,12 +1707,12 @@ impl } } -impl historical::SessionManager for Pallet { - fn new_session(new_index: SessionIndex) -> Option> { +impl historical::SessionManager for Pallet { + fn new_session(new_index: SessionIndex) -> Option> { >::new_session(new_index) .map(|validators| validators.into_iter().map(|v| (v, ())).collect()) } - fn new_session_genesis(new_index: SessionIndex) -> Option> { + fn new_session_genesis(new_index: SessionIndex) -> Option> { >::new_session_genesis(new_index) .map(|validators| validators.into_iter().map(|v| (v, ())).collect()) } @@ -1710,7 +1731,7 @@ where T: Config + pallet_authorship::Config + pallet_session::Config, { fn note_author(author: T::AccountId) { - Self::reward_by_ids(vec![(author, 20)]) + >::reward_by_ids(vec![(author, 20)]) } } @@ -1757,8 +1778,14 @@ where impl ScoreProvider for Pallet { type Score = VoteWeight; - fn score(who: &T::AccountId) -> Self::Score { - Self::weight_of(who) + fn score(who: &T::AccountId) -> Option { + Self::ledger(Stash(who.clone())) + .map(|l| l.active) + .map(|a| { + let issuance = asset::total_issuance::(); + T::CurrencyToVote::to_vote(a, issuance) + }) + .ok() } #[cfg(feature = "runtime-benchmarks")] @@ -1830,7 +1857,7 @@ impl SortedListProvider for UseValidatorsMap { } fn unsafe_regenerate( _: impl IntoIterator, - _: Box Self::Score>, + _: Box Option>, ) -> u32 { // nothing to do upon regenerate. 0 @@ -1849,6 +1876,10 @@ impl SortedListProvider for UseValidatorsMap { fn score_update_worst_case(_who: &T::AccountId, _is_increase: bool) -> Self::Score { unimplemented!() } + + fn lock() {} + + fn unlock() {} } /// A simple voter list implementation that does not require any additional pallets. Note, this @@ -1906,7 +1937,7 @@ impl SortedListProvider for UseNominatorsAndValidatorsM } fn unsafe_regenerate( _: impl IntoIterator, - _: Box Self::Score>, + _: Box Option>, ) -> u32 { // nothing to do upon regenerate. 0 @@ -1930,6 +1961,10 @@ impl SortedListProvider for UseNominatorsAndValidatorsM fn score_update_worst_case(_who: &T::AccountId, _is_increase: bool) -> Self::Score { unimplemented!() } + + fn lock() {} + + fn unlock() {} } impl StakingInterface for Pallet { @@ -2033,7 +2068,7 @@ impl StakingInterface for Pallet { } fn election_ongoing() -> bool { - T::ElectionProvider::ongoing() + T::ElectionProvider::status().is_ok() } fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { @@ -2163,6 +2198,12 @@ impl sp_staking::StakingUnchecked for Pallet { } } +impl RewardsReporter for Pallet { + fn reward_by_ids(validators_points: impl IntoIterator) { + Self::reward_by_ids(validators_points) + } +} + #[cfg(any(test, feature = "try-runtime"))] impl Pallet { pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), TryRuntimeError> { @@ -2272,11 +2313,16 @@ impl Pallet { ::TargetList::count() == Validators::::count(), "wrong external count" ); + + let max_validators_bound = MaxWinnersOf::::get(); + let max_winners_per_page_bound = crate::MaxWinnersPerPageOf::::get(); + ensure!( - ValidatorCount::::get() <= - ::MaxWinners::get(), - Error::::TooManyValidators + max_validators_bound >= max_winners_per_page_bound, + "max validators should be higher than per page bounds" ); + + ensure!(ValidatorCount::::get() <= max_validators_bound, Error::::TooManyValidators); Ok(()) } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 37833b321372d..5ebf63cb34f42 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -19,9 +19,7 @@ use alloc::vec::Vec; use codec::Codec; -use frame_election_provider_support::{ - ElectionProvider, ElectionProviderBase, SortedListProvider, VoteWeight, -}; +use frame_election_provider_support::{ElectionProvider, SortedListProvider, VoteWeight}; use frame_support::{ pallet_prelude::*, traits::{ @@ -254,6 +252,10 @@ pub mod pallet { #[pallet::constant] type MaxExposurePageSize: Get; + /// The absolute maximum of winner validators this pallet should return. + #[pallet::constant] + type MaxValidatorSet: Get; + /// Something that provides a best-effort sorted list of voters aka electing nominators, /// used for NPoS election. /// @@ -375,6 +377,7 @@ pub mod pallet { type NextNewSession = (); type MaxExposurePageSize = ConstU32<64>; type MaxUnlockingChunks = ConstU32<32>; + type MaxValidatorSet = ConstU32<100>; type MaxControllersInDeprecationBatch = ConstU32<100>; type EventListeners = (); type Filter = Nothing; @@ -691,8 +694,7 @@ pub mod pallet { /// `[active_era - bounding_duration; active_era]` #[pallet::storage] #[pallet::unbounded] - pub(crate) type BondedEras = - StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; + pub type BondedEras = StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; /// All slashing events on validators, mapped by era to the highest slash proportion /// and slash value of the era. @@ -805,7 +807,8 @@ pub mod pallet { }); assert!( ValidatorCount::::get() <= - ::MaxWinners::get() + ::MaxWinnersPerPage::get() * + ::Pages::get() ); } @@ -988,8 +991,8 @@ pub mod pallet { // ensure election results are always bounded with the same value assert!( - ::MaxWinners::get() == - ::MaxWinners::get() + ::MaxWinnersPerPage::get() == + ::MaxWinnersPerPage::get() ); assert!( @@ -1534,10 +1537,8 @@ pub mod pallet { ensure_root(origin)?; // ensure new validator count does not exceed maximum winners // support by election provider. - ensure!( - new <= ::MaxWinners::get(), - Error::::TooManyValidators - ); + ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); + ValidatorCount::::put(new); Ok(()) } @@ -1558,10 +1559,7 @@ pub mod pallet { ensure_root(origin)?; let old = ValidatorCount::::get(); let new = old.checked_add(additional).ok_or(ArithmeticError::Overflow)?; - ensure!( - new <= ::MaxWinners::get(), - Error::::TooManyValidators - ); + ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); ValidatorCount::::put(new); Ok(()) @@ -1581,10 +1579,7 @@ pub mod pallet { let old = ValidatorCount::::get(); let new = old.checked_add(factor.mul_floor(old)).ok_or(ArithmeticError::Overflow)?; - ensure!( - new <= ::MaxWinners::get(), - Error::::TooManyValidators - ); + ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); ValidatorCount::::put(new); Ok(()) diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index a151f257d39a3..9badc2c2cee50 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -30,7 +30,7 @@ use frame_support::{ pallet_prelude::*, traits::{ fungible::Inspect, Currency, Get, InspectLockableCurrency, LockableCurrency, - ReservableCurrency, WithdrawReasons, + ReservableCurrency, RewardsReporter, WithdrawReasons, }, }; use mock::*; @@ -43,7 +43,7 @@ use sp_runtime::{ }; use sp_staking::{ offence::{OffenceDetails, OnOffenceHandler}, - SessionIndex, + SessionIndex, StakingAccount, }; use substrate_test_utils::assert_eq_uvec; @@ -2267,14 +2267,12 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { // winners should be 21 and 31. Otherwise this election is taking duplicates into // account. - let supports = ::ElectionProvider::elect().unwrap(); - assert_eq!( - supports, - vec![ - (21, Support { total: 1800, voters: vec![(21, 1000), (1, 400), (3, 400)] }), - (31, Support { total: 2200, voters: vec![(31, 1000), (1, 600), (3, 600)] }) - ], - ); + let supports = ::ElectionProvider::elect(SINGLE_PAGE).unwrap(); + let expected_supports = vec![ + (21, Support { total: 1800, voters: vec![(21, 1000), (1, 400), (3, 400)] }), + (31, Support { total: 2200, voters: vec![(31, 1000), (1, 600), (3, 600)] }), + ]; + assert_eq!(supports, to_bounded_supports(expected_supports)); }); } @@ -2319,14 +2317,13 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21])); // winners should be 21 and 11. - let supports = ::ElectionProvider::elect().unwrap(); - assert_eq!( - supports, - vec![ - (11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }), - (21, Support { total: 2500, voters: vec![(21, 1000), (1, 500), (3, 1000)] }) - ], - ); + let supports = ::ElectionProvider::elect(SINGLE_PAGE).unwrap(); + let expected_supports = vec![ + (11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }), + (21, Support { total: 2500, voters: vec![(21, 1000), (1, 500), (3, 1000)] }), + ]; + + assert_eq!(supports, to_bounded_supports(expected_supports)); }); } @@ -4717,28 +4714,6 @@ fn restricted_accounts_can_only_withdraw() { }) } -#[test] -fn validator_existence_check() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - - // Given: 11 is an active validator for Era 1 - assert!(ErasStakersOverview::::get(1, 11).is_some()); - - // And: 31 is not an active validator for Era 1 - assert!(ErasStakersOverview::::get(1, 31).is_none()); - - // Then: 11 converts to Exists. - assert_eq!( - ExistenceOrLegacyExposureOf::::convert(11), - Some(ExistenceOrLegacyExposure::Exists) - ); - - // And: 31 converts to None (Not Exists). - assert_eq!(ExistenceOrLegacyExposureOf::::convert(31), None); - }); -} - mod election_data_provider { use super::*; use frame_election_provider_support::ElectionDataProvider; @@ -4788,14 +4763,18 @@ mod election_data_provider { .build_and_execute(|| { // default bounds are unbounded. assert_ok!(::electing_voters( - DataProviderBounds::default() + DataProviderBounds::default(), + SINGLE_PAGE, )); assert_eq!(MinimumActiveStake::::get(), 10); // remove staker with lower bond by limiting the number of voters and check // `MinimumActiveStake` again after electing voters. let bounds = ElectionBoundsBuilder::default().voters_count(5.into()).build(); - assert_ok!(::electing_voters(bounds.voters)); + assert_ok!(::electing_voters( + bounds.voters, + SINGLE_PAGE + )); assert_eq!(MinimumActiveStake::::get(), 50); }); } @@ -4806,7 +4785,8 @@ mod election_data_provider { ExtBuilder::default().has_stakers(false).build_and_execute(|| { // default bounds are unbounded. assert_ok!(::electing_voters( - DataProviderBounds::default() + DataProviderBounds::default(), + SINGLE_PAGE, )); assert_eq!(::VoterList::count(), 0); assert_eq!(MinimumActiveStake::::get(), 0); @@ -4822,9 +4802,11 @@ mod election_data_provider { assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); assert_eq!(::VoterList::count(), 5); - let voters_before = - ::electing_voters(DataProviderBounds::default()) - .unwrap(); + let voters_before = ::electing_voters( + DataProviderBounds::default(), + SINGLE_PAGE, + ) + .unwrap(); assert_eq!(MinimumActiveStake::::get(), 5); // update minimum nominator bond. @@ -4834,9 +4816,11 @@ mod election_data_provider { // lower than `MinNominatorBond`. assert_eq!(::VoterList::count(), 5); - let voters = - ::electing_voters(DataProviderBounds::default()) - .unwrap(); + let voters = ::electing_voters( + DataProviderBounds::default(), + SINGLE_PAGE, + ) + .unwrap(); assert_eq!(voters_before, voters); // minimum active stake is lower than `MinNominatorBond`. @@ -4854,6 +4838,7 @@ mod election_data_provider { assert_eq!(Staking::weight_of(&101), 500); let voters = ::electing_voters( DataProviderBounds::default(), + SINGLE_PAGE, ) .unwrap(); assert_eq!(voters.len(), 5); @@ -4869,6 +4854,7 @@ mod election_data_provider { let voters = ::electing_voters( DataProviderBounds::default(), + SINGLE_PAGE, ) .unwrap(); // number of returned voters decreases since ledger entry of stash 101 is now @@ -4890,7 +4876,8 @@ mod election_data_provider { ExtBuilder::default().nominate(false).build_and_execute(|| { // default bounds are unbounded. assert!(>::iter().map(|(x, _)| x).all(|v| Staking::electing_voters( - DataProviderBounds::default() + DataProviderBounds::default(), + SINGLE_PAGE, ) .unwrap() .into_iter() @@ -4944,12 +4931,15 @@ mod election_data_provider { // 11 is taken; // we finish since the 2x limit is reached. assert_eq!( - Staking::electing_voters(bounds_builder.voters_count(2.into()).build().voters) - .unwrap() - .iter() - .map(|(stash, _, _)| stash) - .copied() - .collect::>(), + Staking::electing_voters( + bounds_builder.voters_count(2.into()).build().voters, + SINGLE_PAGE, + ) + .unwrap() + .iter() + .map(|(stash, _, _)| stash) + .copied() + .collect::>(), vec![11], ); }); @@ -4967,32 +4957,42 @@ mod election_data_provider { // if voter count limit is less.. assert_eq!( - Staking::electing_voters(bounds_builder.voters_count(1.into()).build().voters) - .unwrap() - .len(), + Staking::electing_voters( + bounds_builder.voters_count(1.into()).build().voters, + SINGLE_PAGE, + ) + .unwrap() + .len(), 1 ); // if voter count limit is equal.. assert_eq!( - Staking::electing_voters(bounds_builder.voters_count(5.into()).build().voters) - .unwrap() - .len(), + Staking::electing_voters( + bounds_builder.voters_count(5.into()).build().voters, + SINGLE_PAGE, + ) + .unwrap() + .len(), 5 ); // if voter count limit is more. assert_eq!( - Staking::electing_voters(bounds_builder.voters_count(55.into()).build().voters) - .unwrap() - .len(), + Staking::electing_voters( + bounds_builder.voters_count(55.into()).build().voters, + SINGLE_PAGE, + ) + .unwrap() + .len(), 5 ); // if target count limit is more.. assert_eq!( Staking::electable_targets( - bounds_builder.targets_count(6.into()).build().targets + bounds_builder.targets_count(6.into()).build().targets, + SINGLE_PAGE, ) .unwrap() .len(), @@ -5002,7 +5002,8 @@ mod election_data_provider { // if target count limit is equal.. assert_eq!( Staking::electable_targets( - bounds_builder.targets_count(4.into()).build().targets + bounds_builder.targets_count(4.into()).build().targets, + SINGLE_PAGE, ) .unwrap() .len(), @@ -5012,7 +5013,8 @@ mod election_data_provider { // if target limit count is less, then we return an error. assert_eq!( Staking::electable_targets( - bounds_builder.targets_count(1.into()).build().targets + bounds_builder.targets_count(1.into()).build().targets, + SINGLE_PAGE, ) .unwrap_err(), "Target snapshot too big" @@ -5025,25 +5027,25 @@ mod election_data_provider { ExtBuilder::default().build_and_execute(|| { // voters: set size bounds that allows only for 1 voter. let bounds = ElectionBoundsBuilder::default().voters_size(26.into()).build(); - let elected = Staking::electing_voters(bounds.voters).unwrap(); + let elected = Staking::electing_voters(bounds.voters, SINGLE_PAGE).unwrap(); assert!(elected.encoded_size() == 26 as usize); let prev_len = elected.len(); // larger size bounds means more quota for voters. let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build(); - let elected = Staking::electing_voters(bounds.voters).unwrap(); + let elected = Staking::electing_voters(bounds.voters, SINGLE_PAGE).unwrap(); assert!(elected.encoded_size() <= 100 as usize); assert!(elected.len() > 1 && elected.len() > prev_len); // targets: set size bounds that allows for only one target to fit in the snapshot. let bounds = ElectionBoundsBuilder::default().targets_size(10.into()).build(); - let elected = Staking::electable_targets(bounds.targets).unwrap(); + let elected = Staking::electable_targets(bounds.targets, SINGLE_PAGE).unwrap(); assert!(elected.encoded_size() == 9 as usize); let prev_len = elected.len(); // larger size bounds means more space for targets. let bounds = ElectionBoundsBuilder::default().targets_size(100.into()).build(); - let elected = Staking::electable_targets(bounds.targets).unwrap(); + let elected = Staking::electable_targets(bounds.targets, SINGLE_PAGE).unwrap(); assert!(elected.encoded_size() <= 100 as usize); assert!(elected.len() > 1 && elected.len() > prev_len); }); @@ -5087,7 +5089,7 @@ mod election_data_provider { // even through 61 has nomination quota of 2 at the time of the election, all the // nominations (5) will be used. assert_eq!( - Staking::electing_voters(DataProviderBounds::default()) + Staking::electing_voters(DataProviderBounds::default(), SINGLE_PAGE) .unwrap() .iter() .map(|(stash, _, targets)| (*stash, targets.len())) @@ -5111,7 +5113,7 @@ mod election_data_provider { // nominations of controller 70 won't be added due to voter size limit exceeded. let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build(); assert_eq!( - Staking::electing_voters(bounds.voters) + Staking::electing_voters(bounds.voters, SINGLE_PAGE) .unwrap() .iter() .map(|(stash, _, targets)| (*stash, targets.len())) @@ -5128,7 +5130,7 @@ mod election_data_provider { // include the electing voters of 70. let bounds = ElectionBoundsBuilder::default().voters_size(1_000.into()).build(); assert_eq!( - Staking::electing_voters(bounds.voters) + Staking::electing_voters(bounds.voters, SINGLE_PAGE) .unwrap() .iter() .map(|(stash, _, targets)| (*stash, targets.len())) @@ -5697,7 +5699,7 @@ fn change_of_absolute_max_nominations() { let bounds = DataProviderBounds::default(); // 3 validators and 3 nominators - assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds, SINGLE_PAGE).unwrap().len(), 3 + 3); // abrupt change from 16 to 4, everyone should be fine. AbsoluteMaxNominations::set(4); @@ -5708,7 +5710,7 @@ fn change_of_absolute_max_nominations() { .collect::>(), vec![(101, 2), (71, 3), (61, 1)] ); - assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds, SINGLE_PAGE,).unwrap().len(), 3 + 3); // No one can be chilled on account of non-decodable keys. for k in Nominators::::iter_keys() { @@ -5727,7 +5729,7 @@ fn change_of_absolute_max_nominations() { .collect::>(), vec![(101, 2), (71, 3), (61, 1)] ); - assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds, SINGLE_PAGE,).unwrap().len(), 3 + 3); // As before, no one can be chilled on account of non-decodable keys. for k in Nominators::::iter_keys() { @@ -5761,7 +5763,7 @@ fn change_of_absolute_max_nominations() { // but its value cannot be decoded and default is returned. assert!(Nominators::::get(71).is_none()); - assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 2); + assert_eq!(Staking::electing_voters(bounds, SINGLE_PAGE,).unwrap().len(), 3 + 2); assert!(Nominators::::contains_key(101)); // abrupt change from 2 to 1, this should cause some nominators to be non-decodable, and @@ -5785,7 +5787,7 @@ fn change_of_absolute_max_nominations() { assert!(Nominators::::contains_key(61)); assert!(Nominators::::get(71).is_none()); assert!(Nominators::::get(61).is_some()); - assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 1); + assert_eq!(Staking::electing_voters(bounds, SINGLE_PAGE,).unwrap().len(), 3 + 1); // now one of them can revive themselves by re-nominating to a proper value. assert_ok!(Staking::nominate(RuntimeOrigin::signed(71), vec![1])); @@ -5828,7 +5830,10 @@ fn nomination_quota_max_changes_decoding() { vec![(70, 3), (101, 2), (50, 4), (30, 4), (60, 1)] ); // 4 validators and 4 nominators - assert_eq!(Staking::electing_voters(unbonded_election).unwrap().len(), 4 + 4); + assert_eq!( + Staking::electing_voters(unbonded_election, SINGLE_PAGE,).unwrap().len(), + 4 + 4 + ); }); } @@ -6229,7 +6234,8 @@ fn reducing_max_unlocking_chunks_abrupt() { #[test] fn cannot_set_unsupported_validator_count() { ExtBuilder::default().build_and_execute(|| { - MaxWinners::set(50); + MaxValidatorSet::set(50); + MaxWinnersPerPage::set(50); // set validator count works assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 30)); assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 50)); @@ -6244,7 +6250,8 @@ fn cannot_set_unsupported_validator_count() { #[test] fn increase_validator_count_errors() { ExtBuilder::default().build_and_execute(|| { - MaxWinners::set(50); + MaxValidatorSet::set(50); + MaxWinnersPerPage::set(50); assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 40)); // increase works @@ -6262,7 +6269,8 @@ fn increase_validator_count_errors() { #[test] fn scale_validator_count_errors() { ExtBuilder::default().build_and_execute(|| { - MaxWinners::set(50); + MaxValidatorSet::set(50); + MaxWinnersPerPage::set(50); assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 20)); // scale value works @@ -7153,6 +7161,7 @@ mod staking_unchecked { } mod ledger { use super::*; + use sp_staking::StakingAccount; #[test] fn paired_account_works() { @@ -7440,8 +7449,8 @@ mod ledger { assert_eq!(ledger_updated.stash, stash); // Check `active` and `total` values match the original ledger set by controller. - assert_eq!(ledger_updated.active, (10 + ctlr).into()); - assert_eq!(ledger_updated.total, (10 + ctlr).into()); + assert_eq!(ledger_updated.active, (10 + ctlr) as Balance); + assert_eq!(ledger_updated.total, (10 + ctlr) as Balance); } }) } @@ -8363,6 +8372,7 @@ mod validator_disabling_integration { session_events(), vec![ SessionEvent::NewSession { session_index: 1 }, + SessionEvent::NewQueued, SessionEvent::NewSession { session_index: 2 }, SessionEvent::NewSession { session_index: 3 }, SessionEvent::ValidatorDisabled { validator: 11 } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index 7883541b90191..dc5fe5beef138 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -107,17 +107,6 @@ pub fn expand_outer_origin( filter: #scrate::__private::Rc<#scrate::__private::Box::RuntimeCall) -> bool>>, } - #[cfg(not(feature = "std"))] - impl core::fmt::Debug for RuntimeOrigin { - fn fmt( - &self, - fmt: &mut core::fmt::Formatter, - ) -> core::result::Result<(), core::fmt::Error> { - fmt.write_str("") - } - } - - #[cfg(feature = "std")] impl core::fmt::Debug for RuntimeOrigin { fn fmt( &self, @@ -201,7 +190,7 @@ pub fn expand_outer_origin( #[derive( Clone, PartialEq, Eq, - #scrate::__private::RuntimeDebug, + #scrate::__private::Debug, #scrate::__private::codec::Encode, #scrate::__private::codec::Decode, #scrate::__private::codec::DecodeWithMemTracking, diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 62bb2e35d789c..ed7d040d737b2 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -29,7 +29,7 @@ use sp_runtime::{ traits::{ Dispatchable, ExtensionPostDispatchWeightHandler, RefundWeight, TransactionExtension, }, - DispatchError, RuntimeDebug, + DispatchError, }; use sp_weights::Weight; @@ -73,15 +73,7 @@ pub trait CheckIfFeeless { /// Origin for the System pallet. #[derive( - PartialEq, - Eq, - Clone, - RuntimeDebug, - Encode, - Decode, - DecodeWithMemTracking, - TypeInfo, - MaxEncodedLen, + PartialEq, Eq, Clone, Debug, Encode, Decode, DecodeWithMemTracking, TypeInfo, MaxEncodedLen, )] pub enum RawOrigin { /// The system itself ordained this dispatch to happen: this is the highest privilege level. @@ -151,9 +143,7 @@ pub trait PaysFee { } /// Explicit enum to denote if a transaction pays fee or not. -#[derive( - Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode, DecodeWithMemTracking, TypeInfo, -)] +#[derive(Clone, Copy, Eq, PartialEq, Debug, Encode, Decode, DecodeWithMemTracking, TypeInfo)] pub enum Pays { /// Transactor will pay related fees. Yes, @@ -188,9 +178,7 @@ impl From for Pays { /// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -#[derive( - PartialEq, Eq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo, -)] +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo)] pub enum DispatchClass { /// A normal dispatch. Normal, @@ -256,7 +244,7 @@ impl<'a> OneOrMany for &'a [DispatchClass] { } /// A bundle of static information collected from the `#[pallet::weight]` attributes. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +#[derive(Clone, Copy, Eq, PartialEq, Default, Debug, Encode, Decode, TypeInfo)] pub struct DispatchInfo { /// Weight of this transaction's call. pub call_weight: Weight, @@ -312,16 +300,7 @@ pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &Dispa /// Weight information that is only available post dispatch. /// NOTE: This can only be used to reduce the weight or fee, not increase it. #[derive( - Clone, - Copy, - Eq, - PartialEq, - Default, - RuntimeDebug, - Encode, - Decode, - DecodeWithMemTracking, - TypeInfo, + Clone, Copy, Eq, PartialEq, Default, Debug, Encode, Decode, DecodeWithMemTracking, TypeInfo, )] pub struct PostDispatchInfo { /// Actual weight consumed by a call or `None` which stands for the worst case static weight. diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 747e747231114..0eaa2a13edb2a 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -447,6 +447,12 @@ macro_rules! parameter_types_impl_thread_local { Self::set($value); current } + + /// Kill/reset the value to whatever was set at first. + #[allow(unused)] + pub fn reset() { + Self::set($value); + } } )* } @@ -850,7 +856,6 @@ macro_rules! assert_error_encoded_size { /// /// Returns the original result of the closure. #[macro_export] -#[cfg(feature = "experimental")] macro_rules! hypothetically { ( $e:expr ) => { $crate::storage::transactional::with_transaction(|| -> $crate::__private::TransactionOutcome> { @@ -864,7 +869,6 @@ macro_rules! hypothetically { /// /// Reverts any storage changes made by the closure. #[macro_export] -#[cfg(feature = "experimental")] macro_rules! hypothetically_ok { ($e:expr $(, $args:expr)* $(,)?) => { $crate::assert_ok!($crate::hypothetically!($e) $(, $args)*); diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index f95f73c27ab5e..bf593b411380a 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -62,8 +62,9 @@ pub use misc::{ DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, InherentBuilder, IsInherent, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, - OnNewAccount, PrivilegeCmp, SameOrOther, SignedTransactionBuilder, Time, TryCollect, TryDrop, - TypedGet, UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, WrapperOpaque, + OnNewAccount, PrivilegeCmp, RewardsReporter, SameOrOther, SignedTransactionBuilder, Time, + TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, + WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index f91fc7f586415..4defe851f882e 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -1017,6 +1017,13 @@ impl, const T: u32> EstimateCallFee for } } +#[cfg(feature = "std")] +impl, const T: u64> EstimateCallFee for ConstU64 { + fn estimate_call_fee(_: &Call, _: crate::dispatch::PostDispatchInfo) -> Balance { + (T as u32).into() + } +} + /// A wrapper for any type `T` which implement encode/decode in a way compatible with `Vec`. /// /// The encoding is the encoding of `T` prepended with the compact encoding of its size in bytes. @@ -1264,6 +1271,13 @@ pub trait AccountTouch { fn touch(asset: AssetId, who: &AccountId, depositor: &AccountId) -> DispatchResult; } +/// Trait for reporting additional validator reward points +pub trait RewardsReporter { + /// The input is an iterator of tuples of validator account IDs and the amount of points they + /// should be rewarded. + fn reward_by_ids(validators_points: impl IntoIterator); +} + #[cfg(test)] mod test { use super::*; diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index b468b8647ca19..6c213ed477e64 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -326,7 +326,7 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied 28 | | } | |_^ the trait `Config` is not implemented for `Runtime` | - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::Debug` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:26:11 diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index c3c2cc84f6d97..5596f3e6c5ad7 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1890,13 +1890,15 @@ impl Pallet { BlockWeight::::kill(); } - /// Remove temporary "environment" entries in storage, compute the storage root and return the - /// resulting header for this block. - pub fn finalize() -> HeaderFor { + /// Log the entire resouce usage report up until this point. + /// + /// Uses `crate::LOG_TARGET`, level `debug` and prints the weight and block length usage. + pub fn resource_usage_report() { log::debug!( target: LOG_TARGET, "[{:?}] {} extrinsics, length: {} (normal {}%, op: {}%, mandatory {}%) / normal weight:\ - {} ({}%) op weight {} ({}%) / mandatory weight {} ({}%)", + {} (ref_time: {}%, proof_size: {}%) op weight {} (ref_time {}%, proof_size {}%) / \ + mandatory weight {} (ref_time: {}%, proof_size: {}%)", Self::block_number(), Self::extrinsic_count(), Self::all_extrinsics_len(), @@ -1917,17 +1919,35 @@ impl Pallet { Self::block_weight().get(DispatchClass::Normal).ref_time(), T::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), + sp_runtime::Percent::from_rational( + Self::block_weight().get(DispatchClass::Normal).proof_size(), + T::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap_or(Bounded::max_value()).proof_size() + ).deconstruct(), Self::block_weight().get(DispatchClass::Operational), sp_runtime::Percent::from_rational( Self::block_weight().get(DispatchClass::Operational).ref_time(), T::BlockWeights::get().get(DispatchClass::Operational).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), + sp_runtime::Percent::from_rational( + Self::block_weight().get(DispatchClass::Operational).proof_size(), + T::BlockWeights::get().get(DispatchClass::Operational).max_total.unwrap_or(Bounded::max_value()).proof_size() + ).deconstruct(), Self::block_weight().get(DispatchClass::Mandatory), sp_runtime::Percent::from_rational( Self::block_weight().get(DispatchClass::Mandatory).ref_time(), T::BlockWeights::get().get(DispatchClass::Mandatory).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), + sp_runtime::Percent::from_rational( + Self::block_weight().get(DispatchClass::Mandatory).proof_size(), + T::BlockWeights::get().get(DispatchClass::Mandatory).max_total.unwrap_or(Bounded::max_value()).proof_size() + ).deconstruct(), ); + } + + /// Remove temporary "environment" entries in storage, compute the storage root and return the + /// resulting header for this block. + pub fn finalize() -> HeaderFor { + Self::resource_usage_report(); ExecutionPhase::::kill(); AllExtrinsicsLen::::kill(); storage::unhashed::kill(well_known_keys::INTRABLOCK_ENTROPY); diff --git a/substrate/primitives/npos-elections/src/helpers.rs b/substrate/primitives/npos-elections/src/helpers.rs index 45455b42fb6ca..04f8a5648af85 100644 --- a/substrate/primitives/npos-elections/src/helpers.rs +++ b/substrate/primitives/npos-elections/src/helpers.rs @@ -53,7 +53,7 @@ where { let mut staked = assignment_ratio_to_staked(ratio, &stake_of); staked.iter_mut().try_for_each(|a| { - a.try_normalize(stake_of(&a.who).into()).map_err(Error::ArithmeticError) + a.try_normalize(stake_of(&a.who).into()).map_err(|_| Error::ArithmeticError) })?; Ok(staked) } @@ -73,7 +73,7 @@ pub fn assignment_staked_to_ratio_normalized( ) -> Result>, Error> { let mut ratio = staked.into_iter().map(|a| a.into_assignment()).collect::>(); for assignment in ratio.iter_mut() { - assignment.try_normalize().map_err(Error::ArithmeticError)?; + assignment.try_normalize().map_err(|_| Error::ArithmeticError)?; } Ok(ratio) } diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs index f5a8ccb4351a4..01dab56237e62 100644 --- a/substrate/primitives/npos-elections/src/lib.rs +++ b/substrate/primitives/npos-elections/src/lib.rs @@ -83,7 +83,7 @@ use scale_info::TypeInfo; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use sp_arithmetic::{traits::Zero, Normalizable, PerThing, Rational128, ThresholdOrd}; -use sp_core::{bounded::BoundedVec, RuntimeDebug}; +use sp_core::RuntimeDebug; #[cfg(test)] mod mock; @@ -110,7 +110,16 @@ pub use reduce::reduce; pub use traits::{IdentifierT, PerThing128}; /// The errors that might occur in this crate and `frame-election-provider-solution-type`. -#[derive(Eq, PartialEq, RuntimeDebug)] +#[derive( + Eq, + PartialEq, + RuntimeDebug, + Clone, + codec::Encode, + codec::Decode, + codec::DecodeWithMemTracking, + scale_info::TypeInfo, +)] pub enum Error { /// While going from solution indices to ratio, the weight of all the edges has gone above the /// total. @@ -122,11 +131,13 @@ pub enum Error { /// One of the page indices was invalid. SolutionInvalidPageIndex, /// An error occurred in some arithmetic operation. - ArithmeticError(&'static str), + ArithmeticError, /// The data provided to create support map was invalid. InvalidSupportEdge, /// The number of voters is bigger than the `MaxVoters` bound. TooManyVoters, + /// Some bounds were exceeded when converting election types. + BoundsExceeded, } /// A type which is used in the API of this crate as a numeric weight of a vote, most often the @@ -456,6 +467,12 @@ impl Default for Support { } } +impl Backings for &Support { + fn total(&self) -> ExtendedBalance { + self.total + } +} + /// A target-major representation of the the election outcome. /// /// Essentially a flat variant of [`SupportMap`]. @@ -463,11 +480,6 @@ impl Default for Support { /// The main advantage of this is that it is encodable. pub type Supports = Vec<(A, Support)>; -/// Same as `Supports` but bounded by `B`. -/// -/// To note, the inner `Support` is still unbounded. -pub type BoundedSupports = BoundedVec<(A, Support), B>; - /// Linkage from a winner to their [`Support`]. /// /// This is more helpful than a normal [`Supports`] as it allows faster error checking. @@ -491,8 +503,7 @@ pub fn to_support_map( supports } -/// Same as [`to_support_map`] except it returns a -/// flat vector. +/// Same as [`to_support_map`] except it returns a flat vector. pub fn to_supports( assignments: &[StakedAssignment], ) -> Supports { @@ -511,23 +522,34 @@ pub trait EvaluateSupport { impl EvaluateSupport for Supports { fn evaluate(&self) -> ElectionScore { - let mut minimal_stake = ExtendedBalance::max_value(); - let mut sum_stake: ExtendedBalance = Zero::zero(); - // NOTE: The third element might saturate but fine for now since this will run on-chain and - // need to be fast. - let mut sum_stake_squared: ExtendedBalance = Zero::zero(); - - for (_, support) in self { - sum_stake = sum_stake.saturating_add(support.total); - let squared = support.total.saturating_mul(support.total); - sum_stake_squared = sum_stake_squared.saturating_add(squared); - if support.total < minimal_stake { - minimal_stake = support.total; - } - } + evaluate_support(self.iter().map(|(_, s)| s)) + } +} - ElectionScore { minimal_stake, sum_stake, sum_stake_squared } +/// Generic representation of a support. +pub trait Backings { + /// The total backing of an individual target. + fn total(&self) -> ExtendedBalance; +} + +/// General evaluation of a list of backings that returns an election score. +pub fn evaluate_support(backings: impl Iterator) -> ElectionScore { + let mut minimal_stake = ExtendedBalance::max_value(); + let mut sum_stake: ExtendedBalance = Zero::zero(); + // NOTE: The third element might saturate but fine for now since this will run on-chain and + // need to be fast. + let mut sum_stake_squared: ExtendedBalance = Zero::zero(); + + for support in backings { + sum_stake = sum_stake.saturating_add(support.total()); + let squared = support.total().saturating_mul(support.total()); + sum_stake_squared = sum_stake_squared.saturating_add(squared); + if support.total() < minimal_stake { + minimal_stake = support.total(); + } } + + ElectionScore { minimal_stake, sum_stake, sum_stake_squared } } /// Converts raw inputs to types used in this crate. diff --git a/substrate/primitives/npos-elections/src/phragmen.rs b/substrate/primitives/npos-elections/src/phragmen.rs index d37739b3bfb8c..aaee20f0ad79b 100644 --- a/substrate/primitives/npos-elections/src/phragmen.rs +++ b/substrate/primitives/npos-elections/src/phragmen.rs @@ -97,7 +97,7 @@ pub fn seq_phragmen( voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); assignments .iter_mut() - .try_for_each(|a| a.try_normalize().map_err(crate::Error::ArithmeticError))?; + .try_for_each(|a| a.try_normalize().map_err(|_| crate::Error::ArithmeticError))?; let winners = winners .into_iter() .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake)) @@ -205,7 +205,7 @@ pub fn seq_phragmen_core( // edge of all candidates that eventually have a non-zero weight must be elected. debug_assert!(voter.edges.iter().all(|e| e.candidate.borrow().elected)); // inc budget to sum the budget. - voter.try_normalize_elected().map_err(crate::Error::ArithmeticError)?; + voter.try_normalize_elected().map_err(|_| crate::Error::ArithmeticError)?; } Ok((candidates, voters)) diff --git a/substrate/primitives/npos-elections/src/phragmms.rs b/substrate/primitives/npos-elections/src/phragmms.rs index f02aabb290c47..8d5ec4af4a597 100644 --- a/substrate/primitives/npos-elections/src/phragmms.rs +++ b/substrate/primitives/npos-elections/src/phragmms.rs @@ -71,7 +71,7 @@ pub fn phragmms( assignments .iter_mut() .try_for_each(|a| a.try_normalize()) - .map_err(crate::Error::ArithmeticError)?; + .map_err(|_| crate::Error::ArithmeticError)?; let winners = winners .into_iter() .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake)) diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index b9f69b6daa10e..1fc7c5a297dd7 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -643,9 +643,7 @@ pub enum DispatchError { /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about /// the `Dispatchable` that is only known post dispatch. -#[derive( - Eq, PartialEq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, RuntimeDebug, TypeInfo, -)] +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo)] pub struct DispatchErrorWithPostInfo where Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 024b93d1e0775..863e6cbe2b20f 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -25,7 +25,7 @@ extern crate alloc; use crate::currency_to_vote::CurrencyToVote; use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, DecodeWithMemTracking, Encode, FullCodec, HasCompact, MaxEncodedLen}; -use core::ops::Sub; +use core::ops::{Add, AddAssign, Sub, SubAssign}; use scale_info::TypeInfo; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Zero}, @@ -402,7 +402,31 @@ impl< Balance: HasCompact + AtLeast32BitUnsigned + Copy + codec::MaxEncodedLen, > Exposure { - /// Splits an `Exposure` into `PagedExposureMetadata` and multiple chunks of + /// Splits self into two instances of exposures. + /// + /// `n_others` individual exposures are consumed from self and returned as part of the new + /// exposure. + /// + /// Since this method splits `others` of a single exposure, `total.own` will be the same for + /// both `self` and the returned exposure. + pub fn split_others(&mut self, n_others: u32) -> Self { + let head_others: Vec<_> = + self.others.drain(..(n_others as usize).min(self.others.len())).collect(); + + let total_others_head: Balance = head_others + .iter() + .fold(Zero::zero(), |acc: Balance, o| acc.saturating_add(o.value)); + + self.total = self.total.saturating_sub(total_others_head); + + Self { + total: total_others_head.saturating_add(self.own), + own: self.own, + others: head_others, + } + } + + /// Converts an `Exposure` into `PagedExposureMetadata` and multiple chunks of /// `IndividualExposure` with each chunk having maximum of `page_size` elements. pub fn into_pages( self, @@ -423,7 +447,6 @@ impl< value: individual.value, }) } - exposure_pages.push(ExposurePage { page_total, others }); } @@ -455,6 +478,19 @@ impl Default for ExposurePage { } } +/// Returns an exposure page from a set of individual exposures. +impl From>> + for ExposurePage +{ + fn from(exposures: Vec>) -> Self { + exposures.into_iter().fold(ExposurePage::default(), |mut page, e| { + page.page_total += e.value.clone(); + page.others.push(e); + page + }) + } +} + /// Metadata for Paged Exposure of a validator such as total stake across pages and page count. /// /// In combination with the associated `ExposurePage`s, it can be used to reconstruct a full @@ -472,6 +508,7 @@ impl Default for ExposurePage { TypeInfo, Default, MaxEncodedLen, + Copy, )] pub struct PagedExposureMetadata { /// The total balance backing this validator. @@ -486,6 +523,42 @@ pub struct PagedExposureMetadata { pub page_count: Page, } +impl PagedExposureMetadata +where + Balance: HasCompact + + codec::MaxEncodedLen + + Add + + Sub + + sp_runtime::Saturating + + PartialEq + + Copy + + sp_runtime::traits::Debug, +{ + /// Consumes self and returns the result of the metadata updated with `other_balances` and + /// of adding `other_num` nominators to the metadata. + /// + /// `Max` is a getter of the maximum number of nominators per page. + pub fn update_with>( + self, + others_balance: Balance, + others_num: u32, + ) -> Self { + let page_limit = Max::get().max(1); + let new_nominator_count = self.nominator_count.saturating_add(others_num); + let new_page_count = new_nominator_count + .saturating_add(page_limit) + .saturating_sub(1) + .saturating_div(page_limit); + + Self { + total: self.total.saturating_add(others_balance), + own: self.own, + nominator_count: new_nominator_count, + page_count: new_page_count, + } + } +} + /// A type that belongs only in the context of an `Agent`. /// /// `Agent` is someone that manages delegated funds from [`Delegator`] accounts. It can @@ -646,3 +719,114 @@ pub trait DelegationMigrator { } sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); + +#[cfg(test)] +mod tests { + use sp_core::ConstU32; + + use super::*; + + #[test] + fn update_with_works() { + let metadata = PagedExposureMetadata:: { + total: 1000, + own: 0, // don't care + nominator_count: 10, + page_count: 1, + }; + + assert_eq!( + metadata.update_with::>(1, 1), + PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 2 }, + ); + + assert_eq!( + metadata.update_with::>(1, 1), + PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 3 }, + ); + + assert_eq!( + metadata.update_with::>(1, 1), + PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 3 }, + ); + + assert_eq!( + metadata.update_with::>(1, 1), + PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 11 }, + ); + } + + #[test] + fn individual_exposures_to_exposure_works() { + let exposure_1 = IndividualExposure { who: 1, value: 10u32 }; + let exposure_2 = IndividualExposure { who: 2, value: 20 }; + let exposure_3 = IndividualExposure { who: 3, value: 30 }; + + let exposure_page: ExposurePage = vec![exposure_1, exposure_2, exposure_3].into(); + + assert_eq!( + exposure_page, + ExposurePage { page_total: 60, others: vec![exposure_1, exposure_2, exposure_3] }, + ); + } + + #[test] + fn empty_individual_exposures_to_exposure_works() { + let empty_exposures: Vec> = vec![]; + + let exposure_page: ExposurePage = empty_exposures.into(); + assert_eq!(exposure_page, ExposurePage { page_total: 0, others: vec![] }); + } + + #[test] + fn exposure_split_others_works() { + let exposure = Exposure { + total: 100, + own: 20, + others: vec![ + IndividualExposure { who: 1, value: 20u32 }, + IndividualExposure { who: 2, value: 20 }, + IndividualExposure { who: 3, value: 20 }, + IndividualExposure { who: 4, value: 20 }, + ], + }; + + let mut exposure_0 = exposure.clone(); + // split others with with 0 `n_others` is a noop and returns an empty exposure (with `own` + // only). + let split_exposure = exposure_0.split_others(0); + assert_eq!(exposure_0, exposure); + assert_eq!(split_exposure, Exposure { total: 20, own: 20, others: vec![] }); + + let mut exposure_1 = exposure.clone(); + // split individual exposures so that the returned exposure has 1 individual exposure. + let split_exposure = exposure_1.split_others(1); + assert_eq!(exposure_1.own, 20); + assert_eq!(exposure_1.total, 20 + 3 * 20); + assert_eq!(exposure_1.others.len(), 3); + + assert_eq!(split_exposure.own, 20); + assert_eq!(split_exposure.total, 20 + 1 * 20); + assert_eq!(split_exposure.others.len(), 1); + + let mut exposure_3 = exposure.clone(); + // split individual exposures so that the returned exposure has 3 individual exposures, + // which are consumed from the original exposure. + let split_exposure = exposure_3.split_others(3); + assert_eq!(exposure_3.own, 20); + assert_eq!(exposure_3.total, 20 + 1 * 20); + assert_eq!(exposure_3.others.len(), 1); + + assert_eq!(split_exposure.own, 20); + assert_eq!(split_exposure.total, 20 + 3 * 20); + assert_eq!(split_exposure.others.len(), 3); + + let mut exposure_max = exposure.clone(); + // split others with with more `n_others` than the number of others in the exposure + // consumes all the individual exposures of the original Exposure and returns them in the + // new exposure. + let split_exposure = exposure_max.split_others(u32::MAX); + assert_eq!(split_exposure, exposure); + assert_eq!(exposure_max, Exposure { total: 20, own: 20, others: vec![] }); + } +} diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index 0d2aa66f9bb39..973f5616d18e7 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -19,7 +19,7 @@ //! that use staking. use alloc::vec::Vec; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use sp_core::Get; use sp_runtime::{transaction_validity::TransactionValidityError, DispatchError, Perbill}; @@ -252,7 +252,15 @@ impl OffenceReportSystem for () { /// For instance used for the purposes of distinguishing who should be /// prioritized for disablement. #[derive( - Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, scale_info::TypeInfo, + Clone, + Copy, + PartialEq, + Eq, + Encode, + Decode, + MaxEncodedLen, + sp_runtime::RuntimeDebug, + scale_info::TypeInfo, )] pub struct OffenceSeverity(pub Perbill); diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 216ca84ecaddf..9b8ae8ee8e7d8 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -87,6 +87,7 @@ std = [ "pallet-delegated-staking?/std", "pallet-democracy?/std", "pallet-dev-mode?/std", + "pallet-election-provider-multi-block?/std", "pallet-election-provider-multi-phase?/std", "pallet-election-provider-support-benchmarking?/std", "pallet-elections-phragmen?/std", @@ -135,6 +136,11 @@ std = [ "pallet-session?/std", "pallet-skip-feeless-payment?/std", "pallet-society?/std", + "pallet-staking-async-ah-client?/std", + "pallet-staking-async-rc-client?/std", + "pallet-staking-async-reward-fn?/std", + "pallet-staking-async-runtime-api?/std", + "pallet-staking-async?/std", "pallet-staking-reward-fn?/std", "pallet-staking-runtime-api?/std", "pallet-staking?/std", @@ -268,6 +274,7 @@ runtime-benchmarks = [ "pallet-core-fellowship?/runtime-benchmarks", "pallet-delegated-staking?/runtime-benchmarks", "pallet-democracy?/runtime-benchmarks", + "pallet-election-provider-multi-block?/runtime-benchmarks", "pallet-election-provider-multi-phase?/runtime-benchmarks", "pallet-election-provider-support-benchmarking?/runtime-benchmarks", "pallet-elections-phragmen?/runtime-benchmarks", @@ -308,6 +315,9 @@ runtime-benchmarks = [ "pallet-session-benchmarking?/runtime-benchmarks", "pallet-skip-feeless-payment?/runtime-benchmarks", "pallet-society?/runtime-benchmarks", + "pallet-staking-async-ah-client?/runtime-benchmarks", + "pallet-staking-async-rc-client?/runtime-benchmarks", + "pallet-staking-async?/runtime-benchmarks", "pallet-staking?/runtime-benchmarks", "pallet-state-trie-migration?/runtime-benchmarks", "pallet-sudo?/runtime-benchmarks", @@ -396,6 +406,7 @@ try-runtime = [ "pallet-delegated-staking?/try-runtime", "pallet-democracy?/try-runtime", "pallet-dev-mode?/try-runtime", + "pallet-election-provider-multi-block?/try-runtime", "pallet-election-provider-multi-phase?/try-runtime", "pallet-elections-phragmen?/try-runtime", "pallet-fast-unstake?/try-runtime", @@ -438,6 +449,9 @@ try-runtime = [ "pallet-session?/try-runtime", "pallet-skip-feeless-payment?/try-runtime", "pallet-society?/try-runtime", + "pallet-staking-async-ah-client?/try-runtime", + "pallet-staking-async-rc-client?/try-runtime", + "pallet-staking-async?/try-runtime", "pallet-staking?/try-runtime", "pallet-state-trie-migration?/try-runtime", "pallet-statement?/try-runtime", @@ -599,6 +613,7 @@ runtime-full = [ "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", + "pallet-election-provider-multi-block", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", @@ -649,6 +664,11 @@ runtime-full = [ "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", + "pallet-staking-async", + "pallet-staking-async-ah-client", + "pallet-staking-async-rc-client", + "pallet-staking-async-reward-fn", + "pallet-staking-async-runtime-api", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", @@ -1367,6 +1387,11 @@ default-features = false optional = true path = "../substrate/frame/examples/dev-mode" +[dependencies.pallet-election-provider-multi-block] +default-features = false +optional = true +path = "../substrate/frame/election-provider-multi-block" + [dependencies.pallet-election-provider-multi-phase] default-features = false optional = true @@ -1617,6 +1642,31 @@ default-features = false optional = true path = "../substrate/frame/staking" +[dependencies.pallet-staking-async] +default-features = false +optional = true +path = "../substrate/frame/staking-async" + +[dependencies.pallet-staking-async-ah-client] +default-features = false +optional = true +path = "../substrate/frame/staking-async/ah-client" + +[dependencies.pallet-staking-async-rc-client] +default-features = false +optional = true +path = "../substrate/frame/staking-async/rc-client" + +[dependencies.pallet-staking-async-reward-fn] +default-features = false +optional = true +path = "../substrate/frame/staking-async/reward-fn" + +[dependencies.pallet-staking-async-runtime-api] +default-features = false +optional = true +path = "../substrate/frame/staking-async/runtime-api" + [dependencies.pallet-staking-reward-curve] default-features = false optional = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 667170ad1dc1f..2f8a92a583d19 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -443,6 +443,10 @@ pub use pallet_democracy; #[cfg(feature = "pallet-dev-mode")] pub use pallet_dev_mode; +/// PALLET multi phase+block election providers. +#[cfg(feature = "pallet-election-provider-multi-block")] +pub use pallet_election_provider_multi_block; + /// PALLET two phase election providers. #[cfg(feature = "pallet-election-provider-multi-phase")] pub use pallet_election_provider_multi_phase; @@ -653,6 +657,28 @@ pub use pallet_society; #[cfg(feature = "pallet-staking")] pub use pallet_staking; +/// FRAME pallet staking async. +#[cfg(feature = "pallet-staking-async")] +pub use pallet_staking_async; + +/// Pallet handling the communication with staking-rc-client. It's role is to glue the staking +/// pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way. +#[cfg(feature = "pallet-staking-async-ah-client")] +pub use pallet_staking_async_ah_client; + +/// Pallet handling the communication with staking-ah-client. It's role is to glue the staking +/// pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way. +#[cfg(feature = "pallet-staking-async-rc-client")] +pub use pallet_staking_async_rc_client; + +/// Reward function for FRAME staking pallet. +#[cfg(feature = "pallet-staking-async-reward-fn")] +pub use pallet_staking_async_reward_fn; + +/// RPC runtime API for transaction payment FRAME pallet. +#[cfg(feature = "pallet-staking-async-runtime-api")] +pub use pallet_staking_async_runtime_api; + /// Reward Curve for FRAME staking pallet. #[cfg(feature = "pallet-staking-reward-curve")] pub use pallet_staking_reward_curve;