From 62184b4266f6bf73fdb21008b5c8e8eb494291ca Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 27 Apr 2022 21:30:28 -0500 Subject: [PATCH 01/54] BEGIN ASYNC candidate-backing CHANGES --- node/core/backing/src/lib.rs | 148 +++++++++++++++++++++++++++++++---- 1 file changed, 132 insertions(+), 16 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index c6c1a76cee61..9d296bd03d34 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -19,7 +19,7 @@ #![deny(unused_crate_dependencies)] use std::{ - collections::{HashMap, HashSet}, + collections::{hash_map::Entry, HashMap, HashSet}, sync::Arc, }; @@ -147,6 +147,13 @@ where } } +// The mode is determined on a per-relay-parent basis, based +// on the runtime API version. +enum Mode { + ProspectiveParachains, + NoProspectiveParachains, +} + #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn run( mut ctx: Context, @@ -154,14 +161,14 @@ async fn run( metrics: Metrics, ) -> FatalResult<()> { let (background_validation_tx, mut background_validation_rx) = mpsc::channel(16); - let mut jobs = HashMap::new(); + let mut view = View::new(); loop { let res = run_iteration( &mut ctx, keystore.clone(), &metrics, - &mut jobs, + &mut view, background_validation_tx.clone(), &mut background_validation_rx, ) @@ -181,7 +188,7 @@ async fn run_iteration( ctx: &mut Context, keystore: SyncCryptoStorePtr, metrics: &Metrics, - jobs: &mut HashMap>, + view: &mut View, background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, background_validation_rx: &mut mpsc::Receiver<(Hash, ValidatedCandidateCommand)>, ) -> Result<(), Error> { @@ -191,7 +198,7 @@ async fn run_iteration( if let Some((relay_parent, command)) = validated_command { handle_validated_candidate_command( &mut *ctx, - jobs, + view, relay_parent, command, ).await?; @@ -204,14 +211,14 @@ async fn run_iteration( FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => handle_active_leaves_update( &mut *ctx, update, - jobs, + view, &keystore, &background_validation_tx, &metrics, ).await?, FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {} FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOverseer::Communication { msg } => handle_communication(&mut *ctx, jobs, msg).await?, + FromOverseer::Communication { msg } => handle_communication(&mut *ctx, view, msg).await?, } } ) @@ -221,11 +228,11 @@ async fn run_iteration( #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn handle_validated_candidate_command( ctx: &mut Context, - jobs: &mut HashMap>, + view: &mut View, relay_parent: Hash, command: ValidatedCandidateCommand, ) -> Result<(), Error> { - if let Some(job) = jobs.get_mut(&relay_parent) { + if let Some(job) = view.job_mut(&relay_parent) { job.job.handle_validated_candidate_command(&job.span, ctx, command).await?; } else { // simple race condition; can be ignored - this relay-parent @@ -238,22 +245,22 @@ async fn handle_validated_candidate_command( #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn handle_communication( ctx: &mut Context, - jobs: &mut HashMap>, + view: &mut View, message: CandidateBackingMessage, ) -> Result<(), Error> { match message { CandidateBackingMessage::Second(relay_parent, candidate, pov) => { - if let Some(job) = jobs.get_mut(&relay_parent) { + if let Some(job) = view.job_mut(&relay_parent) { job.job.handle_second_msg(&job.span, ctx, candidate, pov).await?; } }, CandidateBackingMessage::Statement(relay_parent, statement) => { - if let Some(job) = jobs.get_mut(&relay_parent) { + if let Some(job) = view.job_mut(&relay_parent) { job.job.handle_statement_message(&job.span, ctx, statement).await?; } }, CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) => - if let Some(job) = jobs.get_mut(&relay_parent) { + if let Some(job) = view.job_mut(&relay_parent) { job.job.handle_get_backed_candidates_message(requested_candidates, tx)?; }, } @@ -265,7 +272,7 @@ async fn handle_communication( async fn handle_active_leaves_update( ctx: &mut Context, update: ActiveLeavesUpdate, - jobs: &mut HashMap>, + view: &mut View, keystore: &SyncCryptoStorePtr, background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, metrics: &Metrics, @@ -279,6 +286,9 @@ async fn handle_active_leaves_update( Some(a) => a, }; + // TODO [now]: update view. no ancestry if mode is not + // `ProspectiveParachains`. + macro_rules! try_runtime_api { ($x: expr) => { match $x { @@ -378,7 +388,8 @@ async fn handle_active_leaves_update( drop(assignments_span); let _span = span.child("wait-for-job"); - let job = CandidateBackingJob { + // TODO [now] bound unneeded + let job = CandidateBackingJob:: { parent, session_index, assignment, @@ -397,7 +408,7 @@ async fn handle_active_leaves_update( _marker: std::marker::PhantomData, }; - jobs.insert(parent, JobAndSpan { job, span }); + // TODO [now] view.insert(parent, JobAndSpan { job, span }); Ok(()) } @@ -407,6 +418,111 @@ struct JobAndSpan { span: PerLeafSpan, } +struct ViewEntry { + ref_count: usize, + job: Option>, +} + +#[derive(Debug, PartialEq)] +enum JobStatus { + Unneeded, + Needed, + Existing, +} + +struct View { + // Maps active-leaves to relevant ancestry, according to the + // prospective-parachains subsystem. + active_leaves: HashMap>, + + // maps relay-parents to jobs and spans. + implicit_view: HashMap>, +} + +impl View { + fn new() -> Self { + View { + active_leaves: HashMap::new(), + implicit_view: HashMap::new(), + } + } + + /// Add a leaf to the view, with the given implicit ancestry. + /// + /// Jobs may not already exist for the implicit ancestry, so + fn add_leaf_with_implicit_ancestry( + &mut self, + leaf: Hash, + implicit_ancestry: Vec, + ) { + let ancestry = match self.active_leaves.entry(leaf) { + Entry::Vacant(mut vacant) => vacant.insert(implicit_ancestry), + Entry::Occupied(_) => { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?leaf, + "Attempted to add leaf to view more than once.", + ); + + return + } + }; + + for fresh in ancestry.iter().cloned().chain(std::iter::once(leaf)) { + self.implicit_view.entry(fresh).or_insert_with(|| ViewEntry { + ref_count: 0, + job: None, + }).ref_count += 1; + } + } + + /// Given a deactivated leaf, this does book-keeping on deactivated leaves + fn prune(&mut self, deactivated: Hash) { + if let Some(ancestry) = self.active_leaves.remove(&deactivated) { + for outdated in ancestry.into_iter().chain(std::iter::once(deactivated)) { + if let Entry::Occupied(mut entry) = self.implicit_view.entry(outdated) { + entry.get_mut().ref_count = entry.get().ref_count.saturating_sub(1); + if entry.get().ref_count == 0 { + let _ = entry.remove(); + } + } + } + } + } + + fn supply_needed_job(&mut self, relay_parent: Hash, job: JobAndSpan) { + if self.job_status(&relay_parent) != JobStatus::Needed { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + "Attempted to supply unneeded job to view." + ); + return; + } + + // sanity: is always Some; guarded by job_status check above. + if let Some(x) = self.implicit_view.get_mut(&relay_parent) { + x.job = Some(job); + } + } + + /// The status of the job for a given relay-parent. + fn job_status(&self, relay_parent: &Hash) -> JobStatus { + match self.implicit_view.get(relay_parent) { + None => JobStatus::Unneeded, + Some(entry) => if entry.job.is_some() { + JobStatus::Existing + } else { + JobStatus::Needed + }, + } + } + + fn job_mut<'a>(&'a mut self, relay_parent: &Hash) -> Option<&'a mut JobAndSpan> { + self.implicit_view.get_mut(relay_parent).and_then(|x| x.job.as_mut()) + } +} + /// Holds all data needed for candidate backing job operation. struct CandidateBackingJob { /// The hash of the relay parent on top of which this job is doing it's work. From 821ed42792ee433f6a779b4856e4039b4955e9e5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 28 Apr 2022 20:26:39 -0500 Subject: [PATCH 02/54] rename & document modes --- node/core/backing/src/lib.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 9d296bd03d34..0419f91b46be 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -150,8 +150,13 @@ where // The mode is determined on a per-relay-parent basis, based // on the runtime API version. enum Mode { + // This mode makes use of the prospective parachains subsystem, + // to participate in asynchronous backing. ProspectiveParachains, - NoProspectiveParachains, + // This mode considers the 'base' block of the relay-chain only. + // This is a compatibility mode for the pre-asynchronous-backing + // era. + BaseOnly, } #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] From 1fc4928d9d2aad979d7b27a0aae5758a3eb8bce1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 May 2022 15:02:33 -0500 Subject: [PATCH 03/54] answer prospective validation data requests --- node/core/backing/src/lib.rs | 34 +++++------ .../src/fragment_tree.rs | 21 ++++++- node/core/prospective-parachains/src/lib.rs | 61 ++++++++++++++++++- node/subsystem-types/src/messages.rs | 22 +++++++ 4 files changed, 115 insertions(+), 23 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 0419f91b46be..0b712b58d8dc 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -446,20 +446,13 @@ struct View { impl View { fn new() -> Self { - View { - active_leaves: HashMap::new(), - implicit_view: HashMap::new(), - } + View { active_leaves: HashMap::new(), implicit_view: HashMap::new() } } /// Add a leaf to the view, with the given implicit ancestry. /// /// Jobs may not already exist for the implicit ancestry, so - fn add_leaf_with_implicit_ancestry( - &mut self, - leaf: Hash, - implicit_ancestry: Vec, - ) { + fn add_leaf_with_implicit_ancestry(&mut self, leaf: Hash, implicit_ancestry: Vec) { let ancestry = match self.active_leaves.entry(leaf) { Entry::Vacant(mut vacant) => vacant.insert(implicit_ancestry), Entry::Occupied(_) => { @@ -470,14 +463,14 @@ impl View { ); return - } + }, }; for fresh in ancestry.iter().cloned().chain(std::iter::once(leaf)) { - self.implicit_view.entry(fresh).or_insert_with(|| ViewEntry { - ref_count: 0, - job: None, - }).ref_count += 1; + self.implicit_view + .entry(fresh) + .or_insert_with(|| ViewEntry { ref_count: 0, job: None }) + .ref_count += 1; } } @@ -502,7 +495,7 @@ impl View { ?relay_parent, "Attempted to supply unneeded job to view." ); - return; + return } // sanity: is always Some; guarded by job_status check above. @@ -515,11 +508,12 @@ impl View { fn job_status(&self, relay_parent: &Hash) -> JobStatus { match self.implicit_view.get(relay_parent) { None => JobStatus::Unneeded, - Some(entry) => if entry.job.is_some() { - JobStatus::Existing - } else { - JobStatus::Needed - }, + Some(entry) => + if entry.job.is_some() { + JobStatus::Existing + } else { + JobStatus::Needed + }, } } diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 9972b60490a1..ab9d678f77b0 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -62,7 +62,7 @@ use polkadot_node_subsystem_util::inclusion_emulator::staging::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, + BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, }; @@ -158,6 +158,17 @@ impl CandidateStorage { }) } + /// Get head-data by hash. + pub(crate) fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { + // Get some candidate which has a parent-head with the same hash as requested. + let a_candidate_hash = self.by_parent_head.get(hash).and_then(|m| m.iter().next())?; + + // Extract the full parent head from that candidate's `PersistedValidationData`. + self.by_candidate_hash + .get(a_candidate_hash) + .map(|e| &e.candidate.persisted_validation_data.parent_head) + } + fn iter_para_children<'a>( &'a self, parent_head_hash: &Hash, @@ -271,13 +282,19 @@ impl Scope { .unwrap_or_else(|| self.relay_parent.clone()) } - fn ancestor_by_hash(&self, hash: &Hash) -> Option { + /// Get the ancestor of the fragment tree by hash. + pub fn ancestor_by_hash(&self, hash: &Hash) -> Option { if hash == &self.relay_parent.hash { return Some(self.relay_parent.clone()) } self.ancestors_by_hash.get(hash).map(|info| info.clone()) } + + /// Get the base constraints of the scope + pub fn base_constraints(&self) -> &Constraints { + &self.base_constraints + } } // We use indices into a flat vector to refer to nodes in the tree. diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 0e447aa69b1f..cb4f0e15ece5 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -34,7 +34,8 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, - ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, + ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, + RuntimeApiRequest, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -139,6 +140,8 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<() answer_tree_membership_request(&view, para, candidate, tx), ProspectiveParachainsMessage::GetMinimumRelayParent(para, relay_parent, tx) => answer_minimum_relay_parent_request(&view, para, relay_parent, tx), + ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx) => + answer_prospective_validation_data_request(&view, request, tx), }, } } @@ -476,6 +479,62 @@ fn answer_minimum_relay_parent_request( let _ = tx.send(res); } +fn answer_prospective_validation_data_request( + view: &View, + request: ProspectiveValidationDataRequest, + tx: oneshot::Sender>, +) { + // 1. Try to get the head-data from the candidate store if known. + // 2. Otherwise, it might exist as the base in some relay-parent and we can find it by + // iterating fragment trees. + // 3. Otherwise, it is unknown. + // 4. Also try to find the relay parent block info by scanning + // fragment trees. + // 5. If head data and relay parent block info are found - success. Otherwise, failure. + + let storage = match view.candidate_storage.get(&request.para_id) { + None => { + let _ = tx.send(None); + return + }, + Some(s) => s, + }; + + let mut head_data = + storage.head_data_by_hash(&request.parent_head_data_hash).map(|x| x.clone()); + let mut relay_parent_info = None; + + for fragment_tree in view + .active_leaves + .values() + .filter_map(|x| x.fragment_trees.get(&request.para_id)) + { + if head_data.is_some() && relay_parent_info.is_some() { + break + } + if relay_parent_info.is_none() { + relay_parent_info = + fragment_tree.scope().ancestor_by_hash(&request.candidate_relay_parent); + } + if head_data.is_none() { + let required_parent = &fragment_tree.scope().base_constraints().required_parent; + if required_parent.hash() == request.parent_head_data_hash { + head_data = Some(required_parent.clone()); + } + } + } + + let _ = tx.send(match (head_data, relay_parent_info) { + (Some(h), Some(i)) => Some(PersistedValidationData { + parent_head: h, + relay_parent_number: i.number, + relay_parent_storage_root: i.storage_root, + max_pov_size: request.max_pov_size, + }), + _ => None, + }); +} + #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn fetch_base_constraints( ctx: &mut Context, diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index db2bd89286b7..67abb2451e56 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -956,6 +956,21 @@ pub struct HypotheticalDepthRequest { pub fragment_tree_relay_parent: Hash, } +/// A request for the persisted validation data stored in the prospective +/// parachains subsystem. +#[derive(Debug)] +pub struct ProspectiveValidationDataRequest { + /// The para-id of the candidate. + pub para_id: ParaId, + /// The relay-parent of the candidate. + pub candidate_relay_parent: Hash, + /// The parent head-data hash. + pub parent_head_data_hash: Hash, + /// The maximum POV size expected of this candidate. This should be + /// the maximum as configured during the session. + pub max_pov_size: u32, +} + /// Indicates the relay-parents whose fragment tree a candidate /// is present in and the depths of that tree the candidate is present in. pub type FragmentTreeMembership = Vec<(Hash, Vec)>; @@ -999,4 +1014,11 @@ pub enum ProspectiveParachainsMessage { /// in this para-id, this returns the minimum relay-parent block number in the /// same chain which is accepted in the fragment tree for the para-id. GetMinimumRelayParent(ParaId, Hash, oneshot::Sender>), + /// Get the validation data of some prospective candidate. The candidate doesn't need + /// to be part of any fragment tree, but this only succeeds if the parent head-data and + /// relay-parent are part of some fragment tree. + GetProspectiveValidationData( + ProspectiveValidationDataRequest, + oneshot::Sender>, + ), } From 39f207642ff48a50b49bca68aebf311bcf94eb9f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 May 2022 21:25:01 -0500 Subject: [PATCH 04/54] GetMinimumRelayParents request is now plural --- node/core/prospective-parachains/src/lib.rs | 22 ++++++++++----------- node/subsystem-types/src/messages.rs | 19 ++++++++++++------ 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index cb4f0e15ece5..1ddc84b230b0 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -138,8 +138,8 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<() answer_hypothetical_depths_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => answer_tree_membership_request(&view, para, candidate, tx), - ProspectiveParachainsMessage::GetMinimumRelayParent(para, relay_parent, tx) => - answer_minimum_relay_parent_request(&view, para, relay_parent, tx), + ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) => + answer_minimum_relay_parents_request(&view, relay_parent, tx), ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx) => answer_prospective_validation_data_request(&view, request, tx), }, @@ -464,19 +464,19 @@ fn answer_tree_membership_request( let _ = tx.send(membership); } -fn answer_minimum_relay_parent_request( +fn answer_minimum_relay_parents_request( view: &View, - para: ParaId, relay_parent: Hash, - tx: oneshot::Sender>, + tx: oneshot::Sender>, ) { - let res = view - .active_leaves - .get(&relay_parent) - .and_then(|data| data.fragment_trees.get(¶)) - .map(|tree| tree.scope().earliest_relay_parent().number); + let mut v = Vec::new(); + if let Some(leaf_data) = view.active_leaves.get(&relay_parent) { + for (para_id, fragment_tree) in &leaf_data.fragment_trees { + v.push((*para_id, fragment_tree.scope().earliest_relay_parent().number)); + } + } - let _ = tx.send(res); + let _ = tx.send(v); } fn answer_prospective_validation_data_request( diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 67abb2451e56..1c5e2e3381e5 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -1007,13 +1007,20 @@ pub enum ProspectiveParachainsMessage { GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), /// Get the membership of the candidate in all fragment trees. GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), - /// Get the minimum accepted relay-parent number in the fragment tree - /// for the given relay-parent and para-id. + /// Get the minimum accepted relay-parent number for each para in the fragment tree + /// for the given relay-chain block hash. /// - /// That is, if the relay-parent is known and there's a fragment tree for it, - /// in this para-id, this returns the minimum relay-parent block number in the - /// same chain which is accepted in the fragment tree for the para-id. - GetMinimumRelayParent(ParaId, Hash, oneshot::Sender>), + /// That is, if the block hash is known and is an active leaf, this returns the + /// minimum relay-parent block number in the same branch of the relay chain which + /// is accepted in the fragment tree for each para-id. + /// + /// If the block hash is not an active leaf, this will return an empty vector. + /// + /// Para-IDs which are omitted from this list can be assumed to have no + /// valid candidate relay-parents under the given relay-chain block hash. + /// + /// Para-IDs are returned in no particular order. + GetMinimumRelayParents(Hash, oneshot::Sender>), /// Get the validation data of some prospective candidate. The candidate doesn't need /// to be part of any fragment tree, but this only succeeds if the parent head-data and /// relay-parent are part of some fragment tree. From 80af4bedca7da44171383226bed7fd8842d3bfc5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 19 May 2022 13:26:53 -0500 Subject: [PATCH 05/54] implement an implicit view utility for backing subsystems --- .../src/backing_implicit_view.rs | 209 ++++++++++++++++++ node/subsystem-util/src/lib.rs | 4 + 2 files changed, 213 insertions(+) create mode 100644 node/subsystem-util/src/backing_implicit_view.rs diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs new file mode 100644 index 000000000000..bc95d50934c7 --- /dev/null +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -0,0 +1,209 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use futures::channel::oneshot; +use polkadot_node_subsystem::{ + messages::{ChainApiMessage, ProspectiveParachainsMessage}, + SubsystemSender, +}; +use polkadot_primitives::vstaging::{BlockNumber, Hash, Id as ParaId}; + +use std::collections::HashMap; + +/// Handles the implicit view of the relay chain derived from the immediate view, which +/// is composed of active leaves, and the minimum relay-parents allowed for +/// candidates of various parachains at those leaves. +#[derive(Default, Clone)] +pub struct View { + leaves: HashMap, + block_info_storage: HashMap, +} + +#[derive(Clone)] +struct ImplicitActiveLeafData { + // minimum relay parents can only be fetched for active leaves, + // so this will be empty for all blocks that haven't ever been + // witnessed as active leaves. + minimum_relay_parents: HashMap, + // The minimum of all minimum relay parents for all paras + // in `minimum_relay_parents` + minimum_relay_ancestor: BlockNumber, + number: BlockNumber, + // Ancestry, in descending order, starting from the parent block down + // to the `minimum_relay_ancestor`. + ancestry: Vec, +} + +#[derive(Clone)] +struct BlockInfo { + block_number: BlockNumber, + // If this was previously an active leaf, this will be `Some` + // and is useful for understanding the views of peers in the network + // which may not be in perfect synchrony with our own view. + // + // If they are ahead of us in getting a new leaf, there's nothing we + // can do as it's an unrecognized block hash. But if they're behind us, + // it's useful for us to retain some information about previous leaves' + // implicit views so we can continue to send relevant messages to them + // until they catch up. + maybe_minimum_relay_parents: Option>, + parent_hash: Hash, +} + +impl View { + /// Update the view to a new view, preserving previous + pub async fn update(&mut self, sender: &mut Sender, new_view: Vec) + where + Sender: SubsystemSender, + Sender: SubsystemSender, + { + // Remove all leaves not present in the new view. + self.leaves.retain(|prev, _| new_view.contains(prev)); + + let fresh: Vec = { + new_view + .iter() + .filter(|head| !self.leaves.contains_key(head)) + .cloned() + .collect() + }; + + for leaf_hash in fresh { + let res = fetch_fresh_leaf_and_insert_ancestry( + leaf_hash, + &mut self.block_info_storage, + &mut *sender, + ) + .await; + + if let Some(x) = res { + self.leaves.insert(leaf_hash, x); + } + } + + // Prune everything before the minimum out of all leaves, + // pruning absolutely everything if there are no leaves (empty view) + // + // Pruning by block number does leave behind orphaned forks slightly longer + // but the memory overhead is negligible. + { + let minimum = self.leaves.values().map(|l| l.minimum_relay_ancestor).min(); + + self.block_info_storage + .retain(|_, i| minimum.map_or(false, |m| i.block_number < m)); + } + } +} + +async fn fetch_fresh_leaf_and_insert_ancestry( + leaf_hash: Hash, + block_info_storage: &mut HashMap, + sender: &mut Sender, +) -> Option +where + Sender: SubsystemSender, + Sender: SubsystemSender, +{ + let min_relay_parents = { + let (tx, rx) = oneshot::channel(); + sender + .send_message(ProspectiveParachainsMessage::GetMinimumRelayParents(leaf_hash, tx)) + .await; + + match rx.await { + Ok(m) => m, + Err(_) => return None, + } + }; + + let leaf_header = { + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; + + match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => return None, + Ok(Err(_)) => return None, + Err(_) => return None, + } + }; + + let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); + + let leaf_block_info = BlockInfo { + parent_hash: leaf_header.parent_hash, + block_number: leaf_header.number, + maybe_minimum_relay_parents: Some(min_relay_parents.iter().cloned().collect()), + }; + + block_info_storage.insert(leaf_hash, leaf_block_info); + + let ancestry = if leaf_header.number > 0 { + let mut next_ancestor_number = leaf_header.number - 1; + let mut next_ancestor_hash = leaf_header.parent_hash; + let mut ancestry = Vec::with_capacity(leaf_header.number.saturating_sub(min_min) as _); + + // Ensure all ancestors up to and including `min_min` are in the + // block storage. When views advance incrementally, everything + // should already be present. + while next_ancestor_number >= min_min { + let parent_hash = if let Some(info) = block_info_storage.get(&next_ancestor_hash) { + info.parent_hash + } else { + // load the header and insert into block storage. + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await; + + let header = match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => break, + Ok(Err(_)) => break, + Err(_) => break, + }; + + block_info_storage.insert( + next_ancestor_hash, + BlockInfo { + block_number: next_ancestor_number, + parent_hash: header.parent_hash, + maybe_minimum_relay_parents: None, + }, + ); + + header.parent_hash + }; + + ancestry.push(next_ancestor_hash); + if next_ancestor_number == 0 { + break + } + + next_ancestor_number -= 1; + next_ancestor_hash = parent_hash; + } + + ancestry + } else { + Vec::new() + }; + + Some(ImplicitActiveLeafData { + minimum_relay_parents: min_relay_parents.iter().cloned().collect(), + minimum_relay_ancestor: min_min, + ancestry, + number: leaf_header.number, + }) +} diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index cb51f49a489b..4328e9acd054 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -78,6 +78,10 @@ pub mod reexports { pub use polkadot_overseer::gen::{SpawnNamed, SpawnedSubsystem, Subsystem, SubsystemContext}; } +/// A utility for managing the implicit view of the relay-chain derived from active +/// leaves and the minimum allowed relay-parents that parachain candidates can have +/// and be backed in those leaves' children. +pub mod backing_implicit_view; /// An emulator for node-side code to predict the results of on-chain parachain inclusion /// and predict future constraints. pub mod inclusion_emulator; From 7c58f7a89b3bb2766bee41dc3836b7657caa4466 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 23 May 2022 13:11:56 -0500 Subject: [PATCH 06/54] implicit-view: get allowed relay parents --- .../src/backing_implicit_view.rs | 110 +++++++++++++----- 1 file changed, 82 insertions(+), 28 deletions(-) diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index bc95d50934c7..ec9afc4e8da7 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -28,23 +28,46 @@ use std::collections::HashMap; /// candidates of various parachains at those leaves. #[derive(Default, Clone)] pub struct View { - leaves: HashMap, + leaves: HashMap, block_info_storage: HashMap, } +// Minimum relay parents implicitly relative to a particular block. #[derive(Clone)] -struct ImplicitActiveLeafData { +struct AllowedRelayParents { // minimum relay parents can only be fetched for active leaves, // so this will be empty for all blocks that haven't ever been // witnessed as active leaves. minimum_relay_parents: HashMap, + // Ancestry, in descending order, starting from the block hash itself down + // to and including the `minimum_relay_ancestor`. + allowed_relay_parents_contiguous: Vec, +} + +impl AllowedRelayParents { + fn allowed_relay_parents_for(&self, para_id: ParaId, base_number: BlockNumber) -> &[Hash] { + let para_min = match self.minimum_relay_parents.get(¶_id) { + Some(p) => *p, + None => return &[], + }; + + if base_number < para_min { + return &[] + } + + let diff = base_number - para_min; + + // difference of 0 should lead to slice len of 1 + let slice_len = ((diff + 1) as usize).min(self.allowed_relay_parents_contiguous.len()); + &self.allowed_relay_parents_contiguous[..slice_len] + } +} + +#[derive(Clone)] +struct ActiveLeafMinAncestor { // The minimum of all minimum relay parents for all paras - // in `minimum_relay_parents` + // in `minimum_relay_parents` for this block. minimum_relay_ancestor: BlockNumber, - number: BlockNumber, - // Ancestry, in descending order, starting from the parent block down - // to the `minimum_relay_ancestor`. - ancestry: Vec, } #[derive(Clone)] @@ -59,12 +82,15 @@ struct BlockInfo { // it's useful for us to retain some information about previous leaves' // implicit views so we can continue to send relevant messages to them // until they catch up. - maybe_minimum_relay_parents: Option>, + maybe_allowed_relay_parents: Option, parent_hash: Hash, } impl View { - /// Update the view to a new view, preserving previous + /// Update the view to a new view, preserving any previous still-relevant information + /// about blocks. This will request the minimum relay parents from the + /// Prospective Parachains subsystem for each leaf and will load headers in the ancestry of each + /// leaf in the view as needed. pub async fn update(&mut self, sender: &mut Sender, new_view: Vec) where Sender: SubsystemSender, @@ -106,18 +132,44 @@ impl View { .retain(|_, i| minimum.map_or(false, |m| i.block_number < m)); } } + + /// Get the known, allowed relay-parents that are valid for parachain candidates + /// which could be backed in a child of a given block for a given para ID. + /// + /// This is expressed as a contiguous slice of relay-chain block hashes which may + /// include the provided block hash itself. + /// + /// `None` indicates that the block hash isn't part of the implicit view or that + /// there are no known allowed relay parents. + /// + /// This always returns `Some` for active leaves or for blocks that previously + /// were active leaves. + /// + /// This can return the empty slice, which indicates that no relay-parents are allowed + /// for the para, e.g. if the para is not scheduled at the given block hash. + pub fn known_allowed_relay_parents_under( + &self, + block_hash: &Hash, + para_id: ParaId, + ) -> Option<&[Hash]> { + let block_info = self.block_info_storage.get(block_hash)?; + block_info + .maybe_allowed_relay_parents + .as_ref() + .map(|mins| mins.allowed_relay_parents_for(para_id, block_info.block_number)) + } } async fn fetch_fresh_leaf_and_insert_ancestry( leaf_hash: Hash, block_info_storage: &mut HashMap, sender: &mut Sender, -) -> Option +) -> Option where Sender: SubsystemSender, Sender: SubsystemSender, { - let min_relay_parents = { + let min_relay_parents_raw = { let (tx, rx) = oneshot::channel(); sender .send_message(ProspectiveParachainsMessage::GetMinimumRelayParents(leaf_hash, tx)) @@ -141,20 +193,14 @@ where } }; - let min_min = min_relay_parents.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); - - let leaf_block_info = BlockInfo { - parent_hash: leaf_header.parent_hash, - block_number: leaf_header.number, - maybe_minimum_relay_parents: Some(min_relay_parents.iter().cloned().collect()), - }; - - block_info_storage.insert(leaf_hash, leaf_block_info); + let min_min = min_relay_parents_raw.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); let ancestry = if leaf_header.number > 0 { let mut next_ancestor_number = leaf_header.number - 1; let mut next_ancestor_hash = leaf_header.parent_hash; - let mut ancestry = Vec::with_capacity(leaf_header.number.saturating_sub(min_min) as _); + let mut ancestry = + Vec::with_capacity((leaf_header.number.saturating_sub(min_min) as usize) + 1); + ancestry.push(leaf_hash); // Ensure all ancestors up to and including `min_min` are in the // block storage. When views advance incrementally, everything @@ -179,7 +225,7 @@ where BlockInfo { block_number: next_ancestor_number, parent_hash: header.parent_hash, - maybe_minimum_relay_parents: None, + maybe_allowed_relay_parents: None, }, ); @@ -200,10 +246,18 @@ where Vec::new() }; - Some(ImplicitActiveLeafData { - minimum_relay_parents: min_relay_parents.iter().cloned().collect(), - minimum_relay_ancestor: min_min, - ancestry, - number: leaf_header.number, - }) + let allowed_relay_parents = AllowedRelayParents { + minimum_relay_parents: min_relay_parents_raw.iter().cloned().collect(), + allowed_relay_parents_contiguous: ancestry, + }; + + let leaf_block_info = BlockInfo { + parent_hash: leaf_header.parent_hash, + block_number: leaf_header.number, + maybe_allowed_relay_parents: Some(allowed_relay_parents), + }; + + block_info_storage.insert(leaf_hash, leaf_block_info); + + Some(ActiveLeafMinAncestor { minimum_relay_ancestor: min_min }) } From fbcedac16aab498b30577d76b9ec5d6911518021 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 23 May 2022 13:38:36 -0500 Subject: [PATCH 07/54] refactorings and improvements to implicit view --- .../src/backing_implicit_view.rs | 117 ++++++++++++++---- 1 file changed, 95 insertions(+), 22 deletions(-) diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index ec9afc4e8da7..d819a0953f9f 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -16,6 +16,7 @@ use futures::channel::oneshot; use polkadot_node_subsystem::{ + errors::ChainApiError, messages::{ChainApiMessage, ProspectiveParachainsMessage}, SubsystemSender, }; @@ -23,12 +24,15 @@ use polkadot_primitives::vstaging::{BlockNumber, Hash, Id as ParaId}; use std::collections::HashMap; +// Always aim to retain 1 block before the active leaves. +const MINIMUM_RETAIN_LENGTH: BlockNumber = 2; + /// Handles the implicit view of the relay chain derived from the immediate view, which /// is composed of active leaves, and the minimum relay-parents allowed for /// candidates of various parachains at those leaves. #[derive(Default, Clone)] pub struct View { - leaves: HashMap, + leaves: HashMap, block_info_storage: HashMap, } @@ -40,7 +44,7 @@ struct AllowedRelayParents { // witnessed as active leaves. minimum_relay_parents: HashMap, // Ancestry, in descending order, starting from the block hash itself down - // to and including the `minimum_relay_ancestor`. + // to and including the minimum of `minimum_relay_parentes`. allowed_relay_parents_contiguous: Vec, } @@ -64,10 +68,10 @@ impl AllowedRelayParents { } #[derive(Clone)] -struct ActiveLeafMinAncestor { - // The minimum of all minimum relay parents for all paras - // in `minimum_relay_parents` for this block. - minimum_relay_ancestor: BlockNumber, +struct ActiveLeafPruningInfo { + // The mimimum block in the same branch of the relay-chain that should be + // preserved. + retain_minimum: BlockNumber, } #[derive(Clone)] @@ -91,8 +95,12 @@ impl View { /// about blocks. This will request the minimum relay parents from the /// Prospective Parachains subsystem for each leaf and will load headers in the ancestry of each /// leaf in the view as needed. - pub async fn update(&mut self, sender: &mut Sender, new_view: Vec) - where + pub async fn update( + &mut self, + sender: &mut Sender, + new_view: Vec, + observe_err: impl Fn(Hash, FetchError), + ) where Sender: SubsystemSender, Sender: SubsystemSender, { @@ -115,8 +123,16 @@ impl View { ) .await; - if let Some(x) = res { - self.leaves.insert(leaf_hash, x); + match res { + Ok(fetched) => { + let retain_minimum = std::cmp::max( + fetched.minimum_ancestor_number, + fetched.leaf_number.saturating_sub(MINIMUM_RETAIN_LENGTH), + ); + + self.leaves.insert(leaf_hash, ActiveLeafPruningInfo { retain_minimum }); + }, + Err(e) => observe_err(leaf_hash, e), } } @@ -126,7 +142,7 @@ impl View { // Pruning by block number does leave behind orphaned forks slightly longer // but the memory overhead is negligible. { - let minimum = self.leaves.values().map(|l| l.minimum_relay_ancestor).min(); + let minimum = self.leaves.values().map(|l| l.retain_minimum).min(); self.block_info_storage .retain(|_, i| minimum.map_or(false, |m| i.block_number < m)); @@ -160,11 +176,40 @@ impl View { } } +/// Errors when fetching a leaf and associated ancestry. +#[derive(Debug)] +pub enum FetchError { + /// The prospective parachains subsystem was uavailable. + ProspectiveParachainsUnavailable, + /// A block header was unavailable. + BlockHeaderUnavailable(Hash, BlockHeaderUnavailableReason), + /// A block header was unavailable due to a chain API error. + ChainApiError(Hash, ChainApiError), + /// The chain API subsystem was unavailable. + ChainApiUnavailable, +} + +/// Reasons a block header might have been unavailable. +#[derive(Debug)] +pub enum BlockHeaderUnavailableReason { + /// Block header simply unknown. + Unknown, + /// Internal Chain API error. + Internal(ChainApiError), + /// The subsystem was unavailable. + SubsystemUnavailable, +} + +struct FetchSummary { + minimum_ancestor_number: BlockNumber, + leaf_number: BlockNumber, +} + async fn fetch_fresh_leaf_and_insert_ancestry( leaf_hash: Hash, block_info_storage: &mut HashMap, sender: &mut Sender, -) -> Option +) -> Result where Sender: SubsystemSender, Sender: SubsystemSender, @@ -177,7 +222,7 @@ where match rx.await { Ok(m) => m, - Err(_) => return None, + Err(_) => return Err(FetchError::ProspectiveParachainsUnavailable), } }; @@ -187,19 +232,32 @@ where match rx.await { Ok(Ok(Some(header))) => header, - Ok(Ok(None)) => return None, - Ok(Err(_)) => return None, - Err(_) => return None, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), } }; let min_min = min_relay_parents_raw.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); + let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; let ancestry = if leaf_header.number > 0 { let mut next_ancestor_number = leaf_header.number - 1; let mut next_ancestor_hash = leaf_header.parent_hash; - let mut ancestry = - Vec::with_capacity((leaf_header.number.saturating_sub(min_min) as usize) + 1); + + let mut ancestry = Vec::with_capacity(expected_ancestry_len); ancestry.push(leaf_hash); // Ensure all ancestors up to and including `min_min` are in the @@ -215,9 +273,21 @@ where let header = match rx.await { Ok(Ok(Some(header))) => header, - Ok(Ok(None)) => break, - Ok(Err(_)) => break, - Err(_) => break, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), }; block_info_storage.insert( @@ -246,6 +316,9 @@ where Vec::new() }; + let fetched_ancestry = + FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; + let allowed_relay_parents = AllowedRelayParents { minimum_relay_parents: min_relay_parents_raw.iter().cloned().collect(), allowed_relay_parents_contiguous: ancestry, @@ -259,5 +332,5 @@ where block_info_storage.insert(leaf_hash, leaf_block_info); - Some(ActiveLeafMinAncestor { minimum_relay_ancestor: min_min }) + Ok(fetched_ancestry) } From 126ed91ae8076819111781d0774052214d3044d0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 23 May 2022 14:30:02 -0500 Subject: [PATCH 08/54] add some TODOs for tests --- node/subsystem-util/src/backing_implicit_view.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index d819a0953f9f..bcbcbde54711 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -334,3 +334,16 @@ where Ok(fetched_ancestry) } + +#[cfg(test)] +mod tests { + use super::*; + + // TODO [now]: test update into fresh view, and that it constructs `AllowedRelayParents` correctly + + // TODO [now]: test update that reuses some existing block info + + // TODO [now]: test pruning + + // TODO [now]: test that former leaves still have `AllowedRelayParents` +} From a5994a7782ec97d7ec1b17a51fb3d7c043faee45 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 25 May 2022 13:40:43 -0500 Subject: [PATCH 09/54] split implicit view updates into 2 functions --- .../src/backing_implicit_view.rs | 88 +++++++++++-------- 1 file changed, 52 insertions(+), 36 deletions(-) diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index bcbcbde54711..7ba5855b22fb 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -91,49 +91,58 @@ struct BlockInfo { } impl View { - /// Update the view to a new view, preserving any previous still-relevant information - /// about blocks. This will request the minimum relay parents from the + /// Activate a leaf in the view. + /// This will request the minimum relay parents from the /// Prospective Parachains subsystem for each leaf and will load headers in the ancestry of each - /// leaf in the view as needed. - pub async fn update( + /// leaf in the view as needed. These are the 'implicit ancestors' of the leaf. + /// + /// To maximize reuse of outdated leaves, it's best to activate new leaves before + /// deactivating old ones. + /// + /// This returns a list of para-ids which are relevant to the leaf, + /// and the allowed relay parents for these paras under this leaf can be + /// queried with [`known_allowed_relay_parents_under`]. + /// + /// No-op for known leaves. + pub async fn activate_leaf( &mut self, sender: &mut Sender, - new_view: Vec, - observe_err: impl Fn(Hash, FetchError), - ) where + leaf_hash: Hash, + ) -> Result, FetchError> + where Sender: SubsystemSender, Sender: SubsystemSender, { - // Remove all leaves not present in the new view. - self.leaves.retain(|prev, _| new_view.contains(prev)); - - let fresh: Vec = { - new_view - .iter() - .filter(|head| !self.leaves.contains_key(head)) - .cloned() - .collect() - }; + if self.leaves.contains_key(&leaf_hash) { + return Err(FetchError::AlreadyKnown) + } - for leaf_hash in fresh { - let res = fetch_fresh_leaf_and_insert_ancestry( - leaf_hash, - &mut self.block_info_storage, - &mut *sender, - ) - .await; + let res = fetch_fresh_leaf_and_insert_ancestry( + leaf_hash, + &mut self.block_info_storage, + &mut *sender, + ) + .await; + + match res { + Ok(fetched) => { + let retain_minimum = std::cmp::max( + fetched.minimum_ancestor_number, + fetched.leaf_number.saturating_sub(MINIMUM_RETAIN_LENGTH), + ); - match res { - Ok(fetched) => { - let retain_minimum = std::cmp::max( - fetched.minimum_ancestor_number, - fetched.leaf_number.saturating_sub(MINIMUM_RETAIN_LENGTH), - ); + self.leaves.insert(leaf_hash, ActiveLeafPruningInfo { retain_minimum }); - self.leaves.insert(leaf_hash, ActiveLeafPruningInfo { retain_minimum }); - }, - Err(e) => observe_err(leaf_hash, e), - } + Ok(fetched.relevant_paras) + }, + Err(e) => Err(e), + } + } + + /// Deactivate a leaf in the view. This prunes any outdated implicit ancestors as well. + pub fn deactivate_leaf(&mut self, leaf_hash: Hash) { + if self.leaves.remove(&leaf_hash).is_none() { + return } // Prune everything before the minimum out of all leaves, @@ -179,6 +188,8 @@ impl View { /// Errors when fetching a leaf and associated ancestry. #[derive(Debug)] pub enum FetchError { + /// Leaf was already known. + AlreadyKnown, /// The prospective parachains subsystem was uavailable. ProspectiveParachainsUnavailable, /// A block header was unavailable. @@ -203,6 +214,7 @@ pub enum BlockHeaderUnavailableReason { struct FetchSummary { minimum_ancestor_number: BlockNumber, leaf_number: BlockNumber, + relevant_paras: Vec, } async fn fetch_fresh_leaf_and_insert_ancestry( @@ -251,6 +263,7 @@ where }; let min_min = min_relay_parents_raw.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); + let relevant_paras = min_relay_parents_raw.iter().map(|x| x.0).collect(); let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; let ancestry = if leaf_header.number > 0 { @@ -316,8 +329,11 @@ where Vec::new() }; - let fetched_ancestry = - FetchSummary { minimum_ancestor_number: min_min, leaf_number: leaf_header.number }; + let fetched_ancestry = FetchSummary { + minimum_ancestor_number: min_min, + leaf_number: leaf_header.number, + relevant_paras, + }; let allowed_relay_parents = AllowedRelayParents { minimum_relay_parents: min_relay_parents_raw.iter().cloned().collect(), From 8944760d6bd4f1f2e6a73db1ed390cf64234cf9d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 25 May 2022 14:31:24 -0500 Subject: [PATCH 10/54] backing: define State to prepare for functional refactor --- node/core/backing/src/lib.rs | 181 ++++++++++++++++------------------- 1 file changed, 83 insertions(+), 98 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 0b712b58d8dc..4785e92ed4d1 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -19,7 +19,7 @@ #![deny(unused_crate_dependencies)] use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; @@ -45,13 +45,14 @@ use polkadot_node_subsystem::{ Stage, SubsystemError, }; use polkadot_node_subsystem_util::{ - self as util, request_from_runtime, request_session_index_for_child, request_validator_groups, - request_validators, Validator, + self as util, backing_implicit_view::View as ImplicitView, request_from_runtime, + request_session_index_for_child, request_validator_groups, request_validators, Validator, }; use polkadot_primitives::v2::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, SessionIndex, SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, + PersistedValidationData, }; use sp_keystore::SyncCryptoStorePtr; use statement_table::{ @@ -147,16 +148,83 @@ where } } -// The mode is determined on a per-relay-parent basis, based -// on the runtime API version. -enum Mode { - // This mode makes use of the prospective parachains subsystem, - // to participate in asynchronous backing. - ProspectiveParachains, - // This mode considers the 'base' block of the relay-chain only. - // This is a compatibility mode for the pre-asynchronous-backing - // era. - BaseOnly, +struct PerRelayParentState { + /// The hash of the relay parent on top of which this job is doing it's work. + parent: Hash, + /// The session index this corresponds to. + session_index: SessionIndex, + /// The `ParaId` assigned to the local validator at this relay parent. + assignment: Option, + /// The candidates that are backed by enough validators in their group, by hash. + backed: HashSet, + /// The table of candidates and statements under this relay-parent. + table: Table, + /// The table context, including groups. + table_context: TableContext, + /// We issued `Seconded` or `Valid` statements on about these candidates. + issued_statements: HashSet, + /// These candidates are undergoing validation in the background. + awaiting_validation: HashSet, + /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`. + fallbacks: HashMap)>, + /// Spans for all candidates that are not yet backable. + unbacked_candidates: HashMap, +} + +struct PerCandidateState { + persisted_validation_data: PersistedValidationData, + seconded_locally: bool, + para_id: ParaId, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +enum ProspectiveParachainsMode { + // v2 runtime API: no prospective parachains. + Disabled, + // vstaging runtime API: prospective parachains. + Enabled, +} + +impl ProspectiveParachainsMode { + fn is_disabled(&self) -> bool { + self == &ProspectiveParachainsMode::Disabled + } + + fn is_enabled(&self) -> bool { + self == &ProspectiveParachainsMode::Enabled + } +} + +struct ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode, + /// The candidates seconded at various depths under this active + /// leaf. A candidate can only be seconded when its hypothetical + /// depth under every active leaf has an empty entry in this map. + /// + /// When prospective parachains are disabled, the only depth + /// which is allowed is '0'. + seconded_at_depth: BTreeMap, +} + +/// The state of the subsystem. +struct State { + /// The utility for managing the implicit and explicit views ina consistent way. + /// + /// We only feed leaves which have prospective parachains enabled to this view. + implicit_view: ImplicitView, + /// State tracked for all active leaves, whether or not they have prospective parachains + /// enabled. + per_leaf: HashMap, + /// State tracked for all relay-parents backing work is ongoing for. This includes + /// all active leaves. + per_relay_parent: HashMap, + /// State tracked for all candidates relevant to the implicit view. + per_candidate: HashMap, + /// A cloneable sender which is dispatched to background candidate validation tasks to inform + /// the main task of the result. + background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + /// The handle to the keystore used for signing. + keystore: SyncCryptoStorePtr, } #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] @@ -282,10 +350,6 @@ async fn handle_active_leaves_update( background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, metrics: &Metrics, ) -> Result<(), Error> { - for deactivated in update.deactivated { - jobs.remove(&deactivated); - } - let leaf = match update.activated { None => return Ok(()), Some(a) => a, @@ -424,97 +488,18 @@ struct JobAndSpan { } struct ViewEntry { - ref_count: usize, job: Option>, } -#[derive(Debug, PartialEq)] -enum JobStatus { - Unneeded, - Needed, - Existing, -} struct View { - // Maps active-leaves to relevant ancestry, according to the - // prospective-parachains subsystem. - active_leaves: HashMap>, - // maps relay-parents to jobs and spans. implicit_view: HashMap>, } impl View { fn new() -> Self { - View { active_leaves: HashMap::new(), implicit_view: HashMap::new() } - } - - /// Add a leaf to the view, with the given implicit ancestry. - /// - /// Jobs may not already exist for the implicit ancestry, so - fn add_leaf_with_implicit_ancestry(&mut self, leaf: Hash, implicit_ancestry: Vec) { - let ancestry = match self.active_leaves.entry(leaf) { - Entry::Vacant(mut vacant) => vacant.insert(implicit_ancestry), - Entry::Occupied(_) => { - gum::debug!( - target: LOG_TARGET, - relay_parent = ?leaf, - "Attempted to add leaf to view more than once.", - ); - - return - }, - }; - - for fresh in ancestry.iter().cloned().chain(std::iter::once(leaf)) { - self.implicit_view - .entry(fresh) - .or_insert_with(|| ViewEntry { ref_count: 0, job: None }) - .ref_count += 1; - } - } - - /// Given a deactivated leaf, this does book-keeping on deactivated leaves - fn prune(&mut self, deactivated: Hash) { - if let Some(ancestry) = self.active_leaves.remove(&deactivated) { - for outdated in ancestry.into_iter().chain(std::iter::once(deactivated)) { - if let Entry::Occupied(mut entry) = self.implicit_view.entry(outdated) { - entry.get_mut().ref_count = entry.get().ref_count.saturating_sub(1); - if entry.get().ref_count == 0 { - let _ = entry.remove(); - } - } - } - } - } - - fn supply_needed_job(&mut self, relay_parent: Hash, job: JobAndSpan) { - if self.job_status(&relay_parent) != JobStatus::Needed { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - "Attempted to supply unneeded job to view." - ); - return - } - - // sanity: is always Some; guarded by job_status check above. - if let Some(x) = self.implicit_view.get_mut(&relay_parent) { - x.job = Some(job); - } - } - - /// The status of the job for a given relay-parent. - fn job_status(&self, relay_parent: &Hash) -> JobStatus { - match self.implicit_view.get(relay_parent) { - None => JobStatus::Unneeded, - Some(entry) => - if entry.job.is_some() { - JobStatus::Existing - } else { - JobStatus::Needed - }, - } + View { implicit_view: HashMap::new() } } fn job_mut<'a>(&'a mut self, relay_parent: &Hash) -> Option<&'a mut JobAndSpan> { @@ -707,7 +692,7 @@ async fn make_pov_available( n_validators: usize, pov: Arc, candidate_hash: CandidateHash, - validation_data: polkadot_primitives::v2::PersistedValidationData, + validation_data: PersistedValidationData, expected_erasure_root: Hash, span: Option<&jaeger::Span>, ) -> Result, Error> { From c34d0e6dbb4c7b8f75990f17e0ee0f1c37357bc2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 25 May 2022 15:04:06 -0500 Subject: [PATCH 11/54] add some docs --- node/core/backing/src/lib.rs | 47 +++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 4785e92ed4d1..1c4114386755 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -14,7 +14,52 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Implements a `CandidateBackingSubsystem`. +//! Implements the `CandidateBackingSubsystem`. +//! +//! This subsystem maintains the entire responsibility of tracking parachain +//! candidates which can be backed, as well as the issuance of statements +//! about candidates when run on a validator node. +//! +//! There are two types of statements: `Seconded` and `Valid`. +//! `Seconded` implies `Valid`, and nothing should be stated as +//! `Valid` unless its already been `Seconded`. +//! +//! Validators may only second candidates which fall under their own group +//! assignment, and they may only second one candidate per depth per active leaf. +//! Candidates which are stated as either `Second` or `Valid` by a majority of the +//! assigned group of validators may be backed on-chain and proceed to the availability +//! stage. +//! +//! Depth is a concept relating to asynchronous backing, by which validators +//! short sub-chains of candidates are backed and extended off-chain, and then placed +//! asynchronously into blocks of the relay chain as those are authored and as the +//! relay-chain state becomes ready for them. +//! +//! Most of the work of asynchronous backing is handled by the Prospective Parachains +//! subsystem. The 'depth' of a parachain block with respect to a relay chain block is +//! a measure of how many parachain blocks are between the most recent included parachain block +//! in the post-state of the relay-chain block and the candidate. For instance, +//! a candidate that descends directly from the most recent parachain block in the relay-chain +//! state has depth 0. The child of that candidate would have depth 1. And so on. +//! +//! The candidate backing subsystem keeps track of a set of 'active leaves' which are the +//! most recent blocks in the relay-chain (which is in fact a tree) which could be built +//! upon. Depth is always measured against active leaves, and the valid relay-parent that +//! each candidate can have is determined by the active leaves. The Prospective Parachains +//! subsystem enforces that the relay-parent increases monotonoically, so that logic +//! is not handled here. By communicating with the Prospective Parachains subsystem, +//! this subsystem extrapolates an "implicit view" from the set of currently active leaves, +//! which determines the set of all recent relay-chain block hashes which could be relay-parents +//! for candidates backed in children of the active leaves. +//! +//! In fact, this subsystem relies on the Statement Distribution subsystem to prevent spam +//! by enforcing the rule that each validator may second at most one candidate per depth per +//! active leaf. This bounds the number of candidates that the system needs to consider and +//! is not handled within this subsystem, except for candidates seconded locally. +//! +//! This subsystem also handles relay-chain heads which don't support asynchronous backing. +//! For such active leaves, the only valid relay-parent is the leaf hash itself and the only +//! allowed depth is 0. #![deny(unused_crate_dependencies)] From 20cd422d5044ca0524e6ab33d6c3d223035b1029 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 25 May 2022 16:08:09 -0500 Subject: [PATCH 12/54] backing: implement bones of new leaf activation logic --- node/core/backing/src/lib.rs | 193 +++++++++++++++++- node/core/prospective-parachains/src/lib.rs | 3 + node/overseer/src/lib.rs | 2 + .../src/backing_implicit_view.rs | 17 +- .../src/runtime_api_impl/vstaging.rs | 3 + 5 files changed, 213 insertions(+), 5 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 1c4114386755..b1865f681158 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -33,7 +33,9 @@ //! Depth is a concept relating to asynchronous backing, by which validators //! short sub-chains of candidates are backed and extended off-chain, and then placed //! asynchronously into blocks of the relay chain as those are authored and as the -//! relay-chain state becomes ready for them. +//! relay-chain state becomes ready for them. Asynchronous backing allows parachains to +//! grow mostly independently from the state of the relay chain, which gives more time for +//! parachains to be validated and thereby increases performance. //! //! Most of the work of asynchronous backing is handled by the Prospective Parachains //! subsystem. The 'depth' of a parachain block with respect to a relay chain block is @@ -71,7 +73,8 @@ use std::{ use bitvec::vec::BitVec; use futures::{ channel::{mpsc, oneshot}, - FutureExt, SinkExt, StreamExt, + stream::FuturesOrdered, + FutureExt, SinkExt, StreamExt, TryFutureExt, }; use error::{Error, FatalResult}; @@ -85,12 +88,13 @@ use polkadot_node_subsystem::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage, + ProspectiveParachainsMessage, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan, SpawnedSubsystem, Stage, SubsystemError, }; use polkadot_node_subsystem_util::{ - self as util, backing_implicit_view::View as ImplicitView, request_from_runtime, + self as util, backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, request_from_runtime, request_session_index_for_child, request_validator_groups, request_validators, Validator, }; use polkadot_primitives::v2::{ @@ -220,6 +224,7 @@ struct PerCandidateState { persisted_validation_data: PersistedValidationData, seconded_locally: bool, para_id: ParaId, + relay_parent: Hash, } #[derive(Debug, Clone, Copy, PartialEq)] @@ -386,6 +391,188 @@ async fn handle_communication( Ok(()) } +async fn prospective_parachains_mode( + _ctx: &mut Context, + _leaf_hash: Hash, +) -> ProspectiveParachainsMode { + // TODO [now]: this should be a runtime API version call + // cc https://github.com/paritytech/substrate/discussions/11338 + unimplemented!() +} + +// TODO [now]: rename once this is no longer 'new' +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_active_leaves_update_new( + ctx: &mut Context, + update: ActiveLeavesUpdate, + state: &mut State, + metrics: &Metrics, +) -> Result<(), Error> { + enum LeafHasProspectiveParachains { + Enabled(Result, ImplicitViewFetchError>), + Disabled, + } + + // Activate in implicit view before deactivate, per the docs + // on ImplicitView, this is more efficient. + let res = if let Some(leaf) = update.activated { + // Only activate in implicit view if prospective + // parachains are enabled. + let mode = prospective_parachains_mode(ctx, leaf.hash).await; + + let leaf_hash = leaf.hash; + Some((leaf, match mode { + ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, + ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled( + state.implicit_view.activate_leaf( + ctx.sender(), + leaf_hash, + ).await + ) + })) + } else { + None + }; + + for deactivated in update.deactivated { + state.per_leaf.remove(&deactivated); + state.implicit_view.deactivate_leaf(deactivated); + } + + // clean up `per_relay_parent` according to ancestry + // of leaves. we do this so we can clean up candidates right after + // as a result. + // + // when prospective parachains are disabled, the implicit view is empty, + // which means we'll clean up everything. This is correct. + for relay_parent in state.implicit_view.all_allowed_relay_parents() { + state.per_relay_parent.remove(relay_parent); + } + + // clean up `per_candidate` according to which relay-parents + // are known. + // + // when prospective parachains are disabled, we clean up all candidates + // because we've cleaned up all relay parents. this is correct. + state.per_candidate.retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent)); + + // Get relay parents which might be fresh but might be known already + // that are explicit or implicit from the new active leaf. + let fresh_relay_parents = match res { + None => return Ok(()), + Some((leaf, LeafHasProspectiveParachains::Disabled)) => { + // defensive in this case - for enabled, this manifests as an error. + if state.per_leaf.contains_key(&leaf.hash) { return Ok(()) } + + state.per_leaf.insert( + leaf.hash, + ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode::Disabled, + // This is empty because the only allowed relay-parent and depth + // when prospective parachains are disabled is the leaf hash and 0, + // respectively. We've just learned about the leaf hash, so we cannot + // have any candidates seconded with it as a relay-parent yet. + seconded_at_depth: BTreeMap::new(), + } + ); + + vec![leaf.hash] + } + Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => { + let fresh_relay_parents = state.implicit_view.known_allowed_relay_parents_under( + &leaf.hash, + None, + ); + + // At this point, all candidates outside of the implicit view + // have been cleaned up. For all which remain, which we've seconded, + // we ask the prospective parachains subsystem where they land in the fragment + // tree for the given active leaf. This comprises our `seconded_at_depth`. + + let remaining_seconded = state.per_candidate.iter() + .filter(|(_, cd)| cd.seconded_locally) + .map(|(c_hash, cd)| (*c_hash, cd.para_id)); + + // one-to-one correspondence to remaining_seconded + let mut membership_answers = FuturesOrdered::new(); + + for (candidate_hash, para_id) in remaining_seconded { + let (tx, rx) = oneshot::channel(); + membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership))); + + ctx.send_message( + ProspectiveParachainsMessage::GetTreeMembership(para_id, candidate_hash, tx) + ).await; + } + + let mut seconded_at_depth = BTreeMap::new(); + for response in membership_answers.next().await { + match response { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Prospective parachains subsystem unreachable for membership request", + ); + + continue + } + Ok((candidate_hash, membership)) => { + // This request gives membership in all fragment trees. We have some + // wasted data here, and it can be optimized if it proves + // relevant to performance. + if let Some((_, depths)) = membership + .into_iter() + .find(|(leaf_hash, _)| leaf_hash == &leaf.hash) + { + for depth in depths { + seconded_at_depth.insert(depth, candidate_hash); + } + } + } + } + } + + state.per_leaf.insert(leaf.hash, ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode::Enabled, + seconded_at_depth, + }); + + match fresh_relay_parents { + Some(f) => f.to_vec(), + None => { + gum::warn!( + target: LOG_TARGET, + leaf_hash = ?leaf.hash, + "Implicit view gave no relay-parents" + ); + + vec![leaf.hash] + } + } + } + Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?leaf.hash, + err = ?e, + "Failed to load implicit view for leaf." + ); + + return Ok(()) + } + }; + + // add entries in `per_relay_parent`. for all new relay-parents. + for maybe_new in fresh_relay_parents { + if state.per_relay_parent.contains_key(&maybe_new) { continue } + + // TODO [now]: construct a `PerRelayParent` from the runtime API + // and insert it. + } + + Ok(()) +} + #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn handle_active_leaves_update( ctx: &mut Context, diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 1ddc84b230b0..1043d29c9bee 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -163,6 +163,9 @@ async fn handle_active_leaves_update( } for activated in update.activated.into_iter() { + // TODO [now]: skip leaves which don't have prospective parachains + // enabled. This should be a runtime API version check. + let hash = activated.hash; let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 21cd09aee03d..0c63a00974a3 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -430,12 +430,14 @@ pub struct Overseer { #[subsystem(CandidateBackingMessage, sends: [ CandidateValidationMessage, CollatorProtocolMessage, + ChainApiMessage, AvailabilityDistributionMessage, AvailabilityStoreMessage, StatementDistributionMessage, ProvisionerMessage, RuntimeApiMessage, DisputeCoordinatorMessage, + ProspectiveParachainsMessage, ])] candidate_backing: CandidateBacking, diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index 7ba5855b22fb..d87838281dc1 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -49,7 +49,12 @@ struct AllowedRelayParents { } impl AllowedRelayParents { - fn allowed_relay_parents_for(&self, para_id: ParaId, base_number: BlockNumber) -> &[Hash] { + fn allowed_relay_parents_for(&self, para_id: Option, base_number: BlockNumber) -> &[Hash] { + let para_id = match para_id { + None => return &self.allowed_relay_parents_contiguous[..], + Some(p) => p, + }; + let para_min = match self.minimum_relay_parents.get(¶_id) { Some(p) => *p, None => return &[], @@ -158,12 +163,20 @@ impl View { } } + /// Get an iterator over all allowed relay-parents in the view. + pub fn all_allowed_relay_parents<'a>(&'a self) -> impl Iterator + 'a { + self.block_info_storage.keys() + } + /// Get the known, allowed relay-parents that are valid for parachain candidates /// which could be backed in a child of a given block for a given para ID. /// /// This is expressed as a contiguous slice of relay-chain block hashes which may /// include the provided block hash itself. /// + /// If `para_id` is `None`, this returns all valid relay-parents across all paras + /// for the leaf. + /// /// `None` indicates that the block hash isn't part of the implicit view or that /// there are no known allowed relay parents. /// @@ -175,7 +188,7 @@ impl View { pub fn known_allowed_relay_parents_under( &self, block_hash: &Hash, - para_id: ParaId, + para_id: Option, ) -> Option<&[Hash]> { let block_info = self.block_info_storage.get(block_hash)?; block_info diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index 8715cdc53121..eb98893708ff 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -25,3 +25,6 @@ pub fn get_session_disputes( ) -> Vec<(SessionIndex, CandidateHash, DisputeState)> { >::disputes() } + +// TODO [now]: implicit `validity_constraints`. Ensure that `min_relay_parent` +// never goes lower than the point at which asynchronous backing was enabled. From 2b7d883f2585fadd4c044d8d751901d0108c541b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 25 May 2022 16:24:14 -0500 Subject: [PATCH 13/54] backing: create per-relay-parent-states --- node/core/backing/src/lib.rs | 193 +++++++++--------- .../src/backing_implicit_view.rs | 6 +- .../src/runtime_api_impl/vstaging.rs | 1 + 3 files changed, 101 insertions(+), 99 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index b1865f681158..9fa398d3a14c 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -87,21 +87,23 @@ use polkadot_node_subsystem::{ messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, - ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage, - ProspectiveParachainsMessage, + ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiRequest, + StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan, SpawnedSubsystem, Stage, SubsystemError, }; use polkadot_node_subsystem_util::{ - self as util, backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, request_from_runtime, - request_session_index_for_child, request_validator_groups, request_validators, Validator, + self as util, + backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, + request_from_runtime, request_session_index_for_child, request_validator_groups, + request_validators, Validator, }; use polkadot_primitives::v2::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, SessionIndex, - SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, - PersistedValidationData, + CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, PersistedValidationData, + SessionIndex, SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, + ValidityAttestation, }; use sp_keystore::SyncCryptoStorePtr; use statement_table::{ @@ -216,8 +218,6 @@ struct PerRelayParentState { awaiting_validation: HashSet, /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`. fallbacks: HashMap)>, - /// Spans for all candidates that are not yet backable. - unbacked_candidates: HashMap, } struct PerCandidateState { @@ -421,15 +421,15 @@ async fn handle_active_leaves_update_new( let mode = prospective_parachains_mode(ctx, leaf.hash).await; let leaf_hash = leaf.hash; - Some((leaf, match mode { - ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, - ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled( - state.implicit_view.activate_leaf( - ctx.sender(), - leaf_hash, - ).await - ) - })) + Some(( + leaf, + match mode { + ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, + ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled( + state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await, + ), + }, + )) } else { None }; @@ -454,7 +454,9 @@ async fn handle_active_leaves_update_new( // // when prospective parachains are disabled, we clean up all candidates // because we've cleaned up all relay parents. this is correct. - state.per_candidate.retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent)); + state + .per_candidate + .retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent)); // Get relay parents which might be fresh but might be known already // that are explicit or implicit from the new active leaf. @@ -462,7 +464,9 @@ async fn handle_active_leaves_update_new( None => return Ok(()), Some((leaf, LeafHasProspectiveParachains::Disabled)) => { // defensive in this case - for enabled, this manifests as an error. - if state.per_leaf.contains_key(&leaf.hash) { return Ok(()) } + if state.per_leaf.contains_key(&leaf.hash) { + return Ok(()) + } state.per_leaf.insert( leaf.hash, @@ -473,23 +477,23 @@ async fn handle_active_leaves_update_new( // respectively. We've just learned about the leaf hash, so we cannot // have any candidates seconded with it as a relay-parent yet. seconded_at_depth: BTreeMap::new(), - } + }, ); vec![leaf.hash] - } + }, Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => { - let fresh_relay_parents = state.implicit_view.known_allowed_relay_parents_under( - &leaf.hash, - None, - ); + let fresh_relay_parents = + state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); // At this point, all candidates outside of the implicit view // have been cleaned up. For all which remain, which we've seconded, // we ask the prospective parachains subsystem where they land in the fragment // tree for the given active leaf. This comprises our `seconded_at_depth`. - let remaining_seconded = state.per_candidate.iter() + let remaining_seconded = state + .per_candidate + .iter() .filter(|(_, cd)| cd.seconded_locally) .map(|(c_hash, cd)| (*c_hash, cd.para_id)); @@ -500,9 +504,12 @@ async fn handle_active_leaves_update_new( let (tx, rx) = oneshot::channel(); membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership))); - ctx.send_message( - ProspectiveParachainsMessage::GetTreeMembership(para_id, candidate_hash, tx) - ).await; + ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership( + para_id, + candidate_hash, + tx, + )) + .await; } let mut seconded_at_depth = BTreeMap::new(); @@ -515,27 +522,29 @@ async fn handle_active_leaves_update_new( ); continue - } + }, Ok((candidate_hash, membership)) => { // This request gives membership in all fragment trees. We have some // wasted data here, and it can be optimized if it proves // relevant to performance. - if let Some((_, depths)) = membership - .into_iter() - .find(|(leaf_hash, _)| leaf_hash == &leaf.hash) + if let Some((_, depths)) = + membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash) { for depth in depths { seconded_at_depth.insert(depth, candidate_hash); } } - } + }, } } - state.per_leaf.insert(leaf.hash, ActiveLeafState { - prospective_parachains_mode: ProspectiveParachainsMode::Enabled, - seconded_at_depth, - }); + state.per_leaf.insert( + leaf.hash, + ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode::Enabled, + seconded_at_depth, + }, + ); match fresh_relay_parents { Some(f) => f.to_vec(), @@ -547,9 +556,9 @@ async fn handle_active_leaves_update_new( ); vec![leaf.hash] - } + }, } - } + }, Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => { gum::debug!( target: LOG_TARGET, @@ -559,37 +568,34 @@ async fn handle_active_leaves_update_new( ); return Ok(()) - } + }, }; // add entries in `per_relay_parent`. for all new relay-parents. for maybe_new in fresh_relay_parents { - if state.per_relay_parent.contains_key(&maybe_new) { continue } + if state.per_relay_parent.contains_key(&maybe_new) { + continue + } - // TODO [now]: construct a `PerRelayParent` from the runtime API + // construct a `PerRelayParent` from the runtime API // and insert it. + let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore).await?; + + if let Some(per) = per { + state.per_relay_parent.insert(maybe_new, per); + } } Ok(()) } +/// Load the data necessary to do backing work on top of a relay-parent. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_active_leaves_update( +async fn construct_per_relay_parent_state( ctx: &mut Context, - update: ActiveLeavesUpdate, - view: &mut View, + relay_parent: Hash, keystore: &SyncCryptoStorePtr, - background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, - metrics: &Metrics, -) -> Result<(), Error> { - let leaf = match update.activated { - None => return Ok(()), - Some(a) => a, - }; - - // TODO [now]: update view. no ancestry if mode is not - // `ProspectiveParachains`. - +) -> Result, Error> { macro_rules! try_runtime_api { ($x: expr) => { match $x { @@ -604,15 +610,13 @@ async fn handle_active_leaves_update( // We can't do candidate validation work if we don't have the // requisite runtime API data. But these errors should not take // down the node. - return Ok(()); + return Ok(None); } } } } - let parent = leaf.hash; - let span = PerLeafSpan::new(leaf.span, "backing"); - let _span = span.child("runtime-apis"); + let parent = relay_parent; let (validators, groups, session_index, cores) = futures::try_join!( request_validators(parent, ctx.sender()).await, @@ -630,9 +634,6 @@ async fn handle_active_leaves_update( let session_index = try_runtime_api!(session_index); let cores = try_runtime_api!(cores); - drop(_span); - let _span = span.child("validator-construction"); - let signing_context = SigningContext { parent_hash: parent, session_index }; let validator = match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await { @@ -645,17 +646,12 @@ async fn handle_active_leaves_update( "Cannot participate in candidate backing", ); - return Ok(()) + return Ok(None) }, }; - drop(_span); - let mut assignments_span = span.child("compute-assignments"); - let mut groups = HashMap::new(); - let n_cores = cores.len(); - let mut assignment = None; for (idx, core) in cores.into_iter().enumerate() { @@ -674,41 +670,43 @@ async fn handle_active_leaves_update( let table_context = TableContext { groups, validators, validator }; - let (assignment, required_collator) = match assignment { - None => { - assignments_span.add_string_tag("assigned", "false"); - (None, None) - }, - Some((assignment, required_collator)) => { - assignments_span.add_string_tag("assigned", "true"); - assignments_span.add_para_id(assignment); - (Some(assignment), required_collator) - }, - }; - - drop(assignments_span); - let _span = span.child("wait-for-job"); + // TODO [now]: I've removed the `required_collator` more broadly, + // because it's not used in practice and was intended for parathreads. + // + // We should attempt parathreads another way, I think, so it makes sense + // to remove. + let assignment = assignment.map(|(a, _required_collator)| a); - // TODO [now] bound unneeded - let job = CandidateBackingJob:: { + Ok(Some(PerRelayParentState { parent, session_index, assignment, - required_collator, - issued_statements: HashSet::new(), - awaiting_validation: HashSet::new(), - fallbacks: HashMap::new(), - seconded: None, - unbacked_candidates: HashMap::new(), backed: HashSet::new(), - keystore: keystore.clone(), table: Table::default(), table_context, - background_validation_tx: background_validation_tx.clone(), - metrics: metrics.clone(), - _marker: std::marker::PhantomData, + issued_statements: HashSet::new(), + awaiting_validation: HashSet::new(), + fallbacks: HashMap::new(), + })) +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_active_leaves_update( + ctx: &mut Context, + update: ActiveLeavesUpdate, + view: &mut View, + keystore: &SyncCryptoStorePtr, + background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + metrics: &Metrics, +) -> Result<(), Error> { + let leaf = match update.activated { + None => return Ok(()), + Some(a) => a, }; + // TODO [now]: update view. no ancestry if mode is not + // `ProspectiveParachains`. + // TODO [now] view.insert(parent, JobAndSpan { job, span }); Ok(()) @@ -723,7 +721,6 @@ struct ViewEntry { job: Option>, } - struct View { // maps relay-parents to jobs and spans. implicit_view: HashMap>, diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index d87838281dc1..10dd0a1290d5 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -49,7 +49,11 @@ struct AllowedRelayParents { } impl AllowedRelayParents { - fn allowed_relay_parents_for(&self, para_id: Option, base_number: BlockNumber) -> &[Hash] { + fn allowed_relay_parents_for( + &self, + para_id: Option, + base_number: BlockNumber, + ) -> &[Hash] { let para_id = match para_id { None => return &self.allowed_relay_parents_contiguous[..], Some(p) => p, diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index eb98893708ff..edde48f4d984 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -28,3 +28,4 @@ pub fn get_session_disputes( // TODO [now]: implicit `validity_constraints`. Ensure that `min_relay_parent` // never goes lower than the point at which asynchronous backing was enabled. +// Also, never cross session boundaries. From 923b28af55013d71a85259ec7e6c4fe6947d394f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 25 May 2022 16:28:51 -0500 Subject: [PATCH 14/54] use new handle_active_leaves_update --- node/core/backing/src/lib.rs | 100 +++++++++++++++-------------------- 1 file changed, 44 insertions(+), 56 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 9fa398d3a14c..14ddcbc5295c 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -277,6 +277,22 @@ struct State { keystore: SyncCryptoStorePtr, } +impl State { + fn new( + background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + keystore: SyncCryptoStorePtr, + ) -> Self { + State { + implicit_view: ImplicitView::default(), + per_leaf: HashMap::default(), + per_relay_parent: HashMap::default(), + per_candidate: HashMap::new(), + background_validation_tx, + keystore, + } + } +} + #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn run( mut ctx: Context, @@ -284,18 +300,11 @@ async fn run( metrics: Metrics, ) -> FatalResult<()> { let (background_validation_tx, mut background_validation_rx) = mpsc::channel(16); - let mut view = View::new(); + let mut state = State::new(background_validation_tx, keystore); loop { - let res = run_iteration( - &mut ctx, - keystore.clone(), - &metrics, - &mut view, - background_validation_tx.clone(), - &mut background_validation_rx, - ) - .await; + let res = + run_iteration(&mut ctx, &mut state, &metrics, &mut background_validation_rx).await; match res { Ok(()) => break, @@ -309,39 +318,41 @@ async fn run( #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn run_iteration( ctx: &mut Context, - keystore: SyncCryptoStorePtr, + state: &mut State, metrics: &Metrics, - view: &mut View, - background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, background_validation_rx: &mut mpsc::Receiver<(Hash, ValidatedCandidateCommand)>, ) -> Result<(), Error> { loop { futures::select!( validated_command = background_validation_rx.next().fuse() => { - if let Some((relay_parent, command)) = validated_command { - handle_validated_candidate_command( - &mut *ctx, - view, - relay_parent, - command, - ).await?; - } else { - panic!("background_validation_tx always alive at this point; qed"); - } + // TODO [now] + // if let Some((relay_parent, command)) = validated_command { + // handle_validated_candidate_command( + // &mut *ctx, + // view, + // relay_parent, + // command, + // ).await?; + // } else { + // panic!("background_validation_tx always alive at this point; qed"); + // } } from_overseer = ctx.recv().fuse() => { match from_overseer? { - FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => handle_active_leaves_update( - &mut *ctx, - update, - view, - &keystore, - &background_validation_tx, - &metrics, - ).await?, + FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { + handle_active_leaves_update( + &mut *ctx, + update, + state, + &metrics, + ).await?; + } FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {} FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOverseer::Communication { msg } => handle_communication(&mut *ctx, view, msg).await?, + FromOverseer::Communication { msg } => { + // TODO [now] + // handle_communication(&mut *ctx, view, msg).await?, + } } } ) @@ -400,9 +411,8 @@ async fn prospective_parachains_mode( unimplemented!() } -// TODO [now]: rename once this is no longer 'new' #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_active_leaves_update_new( +async fn handle_active_leaves_update( ctx: &mut Context, update: ActiveLeavesUpdate, state: &mut State, @@ -690,28 +700,6 @@ async fn construct_per_relay_parent_state( })) } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_active_leaves_update( - ctx: &mut Context, - update: ActiveLeavesUpdate, - view: &mut View, - keystore: &SyncCryptoStorePtr, - background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, - metrics: &Metrics, -) -> Result<(), Error> { - let leaf = match update.activated { - None => return Ok(()), - Some(a) => a, - }; - - // TODO [now]: update view. no ancestry if mode is not - // `ProspectiveParachains`. - - // TODO [now] view.insert(parent, JobAndSpan { job, span }); - - Ok(()) -} - struct JobAndSpan { job: CandidateBackingJob, span: PerLeafSpan, From b9280d11643a99c927620ce4cc4f00780654bd76 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 16:57:34 -0500 Subject: [PATCH 15/54] begin extracting logic from CandidateBackingJob --- node/core/backing/src/lib.rs | 218 ++++++++++++++++++----------------- 1 file changed, 111 insertions(+), 107 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 14ddcbc5295c..7284a717be60 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -325,17 +325,17 @@ async fn run_iteration( loop { futures::select!( validated_command = background_validation_rx.next().fuse() => { - // TODO [now] - // if let Some((relay_parent, command)) = validated_command { - // handle_validated_candidate_command( - // &mut *ctx, - // view, - // relay_parent, - // command, - // ).await?; - // } else { - // panic!("background_validation_tx always alive at this point; qed"); - // } + if let Some((relay_parent, command)) = validated_command { + handle_validated_candidate_command( + &mut *ctx, + state, + relay_parent, + command, + metrics, + ).await?; + } else { + panic!("background_validation_tx always alive at this point; qed"); + } } from_overseer = ctx.recv().fuse() => { match from_overseer? { @@ -359,23 +359,6 @@ async fn run_iteration( } } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_validated_candidate_command( - ctx: &mut Context, - view: &mut View, - relay_parent: Hash, - command: ValidatedCandidateCommand, -) -> Result<(), Error> { - if let Some(job) = view.job_mut(&relay_parent) { - job.job.handle_validated_candidate_command(&job.span, ctx, command).await?; - } else { - // simple race condition; can be ignored - this relay-parent - // is no longer relevant. - } - - Ok(()) -} - #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn handle_communication( ctx: &mut Context, @@ -408,7 +391,7 @@ async fn prospective_parachains_mode( ) -> ProspectiveParachainsMode { // TODO [now]: this should be a runtime API version call // cc https://github.com/paritytech/substrate/discussions/11338 - unimplemented!() + ProspectiveParachainsMode::Disabled } #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] @@ -700,6 +683,105 @@ async fn construct_per_relay_parent_state( })) } +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_validated_candidate_command( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + command: ValidatedCandidateCommand, + metrics: &Metrics, +) -> Result<(), Error> { + match state.per_relay_parent.get_mut(&relay_parent) { + Some(rp_state) => { + let candidate_hash = command.candidate_hash(); + rp_state.awaiting_validation.remove(&candidate_hash); + + match command { + ValidatedCandidateCommand::Second(res) => match res { + Ok((candidate, commitments, _)) => { + // sanity check. + // TODO [now]: this sanity check is almost certainly + // outdated - we now allow seconding multiple candidates + // per relay-parent. update it to properly defend against + // seconding stuff wrongly. + if !rp_state.issued_statements.contains(&candidate_hash) + { + // TODO [now]: note the candidate as seconded. + rp_state.issued_statements.insert(candidate_hash); + metrics.on_candidate_seconded(); + + let statement = Statement::Seconded(CommittedCandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments, + }); + + + // TODO [now]: + // implement `sign_import_and_distribute_statement` + // for PerRelayParentState. + // + // if let Some(stmt) = self + // .sign_import_and_distribute_statement(ctx, statement, root_span) + // .await? + // { + // ctx.send_message(CollatorProtocolMessage::Seconded( + // rp_state.parent, + // stmt, + // )) + // .await; + // } + } + }, + Err(candidate) => { + ctx.send_message(CollatorProtocolMessage::Invalid(rp_state.parent, candidate)) + .await; + }, + }, + ValidatedCandidateCommand::Attest(res) => { + // We are done - avoid new validation spawns: + rp_state.fallbacks.remove(&candidate_hash); + // sanity check. + if !rp_state.issued_statements.contains(&candidate_hash) { + if res.is_ok() { + let statement = Statement::Valid(candidate_hash); + + // TODO [now]: needs implementation of `sign_import_and_distribute` + // self.sign_import_and_distribute_statement(ctx, statement, &root_span) + // .await?; + } + rp_state.issued_statements.insert(candidate_hash); + } + } + ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => { + if let Some((attesting, span)) = rp_state.fallbacks.get_mut(&candidate_hash) { + if let Some(index) = attesting.backing.pop() { + attesting.from_validator = index; + // Ok, another try: + let c_span = span.as_ref().map(|s| s.child("try")); + let attesting = attesting.clone(); + + // TODO [now]: kick off validation work is unimplemented + // rp_state.kick_off_validation_work(ctx, attesting, c_span).await? + } + } else { + gum::warn!( + target: LOG_TARGET, + "AttestNoPoV was triggered without fallback being available." + ); + debug_assert!(false); + } + } + } + } + None => { + // simple race condition; can be ignored = this relay-parent + // is no longer relevant. + } + } + + Ok(()) +} + struct JobAndSpan { job: CandidateBackingJob, span: PerLeafSpan, @@ -1104,84 +1186,6 @@ struct ValidatorIndexOutOfBounds; #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] impl CandidateBackingJob { - async fn handle_validated_candidate_command( - &mut self, - root_span: &jaeger::Span, - ctx: &mut Context, - command: ValidatedCandidateCommand, - ) -> Result<(), Error> { - let candidate_hash = command.candidate_hash(); - self.awaiting_validation.remove(&candidate_hash); - - match command { - ValidatedCandidateCommand::Second(res) => { - match res { - Ok((candidate, commitments, _)) => { - // sanity check. - if self.seconded.is_none() && - !self.issued_statements.contains(&candidate_hash) - { - self.seconded = Some(candidate_hash); - self.issued_statements.insert(candidate_hash); - self.metrics.on_candidate_seconded(); - - let statement = Statement::Seconded(CommittedCandidateReceipt { - descriptor: candidate.descriptor.clone(), - commitments, - }); - if let Some(stmt) = self - .sign_import_and_distribute_statement(ctx, statement, root_span) - .await? - { - ctx.send_message(CollatorProtocolMessage::Seconded( - self.parent, - stmt, - )) - .await; - } - } - }, - Err(candidate) => { - ctx.send_message(CollatorProtocolMessage::Invalid(self.parent, candidate)) - .await; - }, - } - }, - ValidatedCandidateCommand::Attest(res) => { - // We are done - avoid new validation spawns: - self.fallbacks.remove(&candidate_hash); - // sanity check. - if !self.issued_statements.contains(&candidate_hash) { - if res.is_ok() { - let statement = Statement::Valid(candidate_hash); - self.sign_import_and_distribute_statement(ctx, statement, &root_span) - .await?; - } - self.issued_statements.insert(candidate_hash); - } - }, - ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => { - if let Some((attesting, span)) = self.fallbacks.get_mut(&candidate_hash) { - if let Some(index) = attesting.backing.pop() { - attesting.from_validator = index; - // Ok, another try: - let c_span = span.as_ref().map(|s| s.child("try")); - let attesting = attesting.clone(); - self.kick_off_validation_work(ctx, attesting, c_span).await? - } - } else { - gum::warn!( - target: LOG_TARGET, - "AttestNoPoV was triggered without fallback being available." - ); - debug_assert!(false); - } - }, - } - - Ok(()) - } - async fn background_validate_and_make_available( &mut self, ctx: &mut Context, From 967156ed9fca047c3d2fa34d60a25fac1758c644 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 17:20:34 -0500 Subject: [PATCH 16/54] mostly extract statement import from job logic --- node/core/backing/src/lib.rs | 283 ++++++++++++++++++++++++++++------- 1 file changed, 227 insertions(+), 56 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 7284a717be60..49456211e316 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -704,8 +704,7 @@ async fn handle_validated_candidate_command( // outdated - we now allow seconding multiple candidates // per relay-parent. update it to properly defend against // seconding stuff wrongly. - if !rp_state.issued_statements.contains(&candidate_hash) - { + if !rp_state.issued_statements.contains(&candidate_hash) { // TODO [now]: note the candidate as seconded. rp_state.issued_statements.insert(candidate_hash); metrics.on_candidate_seconded(); @@ -715,26 +714,29 @@ async fn handle_validated_candidate_command( commitments, }); - - // TODO [now]: - // implement `sign_import_and_distribute_statement` - // for PerRelayParentState. - // - // if let Some(stmt) = self - // .sign_import_and_distribute_statement(ctx, statement, root_span) - // .await? - // { - // ctx.send_message(CollatorProtocolMessage::Seconded( - // rp_state.parent, - // stmt, - // )) - // .await; - // } + if let Some(stmt) = sign_import_and_distribute_statement( + ctx, + rp_state, + statement, + state.keystore.clone(), + metrics, + ) + .await? + { + ctx.send_message(CollatorProtocolMessage::Seconded( + rp_state.parent, + stmt, + )) + .await; + } } }, Err(candidate) => { - ctx.send_message(CollatorProtocolMessage::Invalid(rp_state.parent, candidate)) - .await; + ctx.send_message(CollatorProtocolMessage::Invalid( + rp_state.parent, + candidate, + )) + .await; }, }, ValidatedCandidateCommand::Attest(res) => { @@ -745,13 +747,18 @@ async fn handle_validated_candidate_command( if res.is_ok() { let statement = Statement::Valid(candidate_hash); - // TODO [now]: needs implementation of `sign_import_and_distribute` - // self.sign_import_and_distribute_statement(ctx, statement, &root_span) - // .await?; + sign_import_and_distribute_statement( + ctx, + rp_state, + statement, + state.keystore.clone(), + metrics, + ) + .await?; } rp_state.issued_statements.insert(candidate_hash); } - } + }, ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => { if let Some((attesting, span)) = rp_state.fallbacks.get_mut(&candidate_hash) { if let Some(index) = attesting.backing.pop() { @@ -770,18 +777,212 @@ async fn handle_validated_candidate_command( ); debug_assert!(false); } - } + }, } - } + }, None => { // simple race condition; can be ignored = this relay-parent // is no longer relevant. - } + }, + } + + Ok(()) +} + +async fn sign_statement( + rp_state: &PerRelayParentState, + statement: Statement, + keystore: SyncCryptoStorePtr, + metrics: &Metrics, +) -> Option { + let signed = rp_state + .table_context + .validator + .as_ref()? + .sign(keystore, statement) + .await + .ok() + .flatten()?; + metrics.on_statement_signed(); + Some(signed) +} + +/// The dispute coordinator keeps track of all statements by validators about every recent +/// candidate. +/// +/// When importing a statement, this should be called access the candidate receipt either +/// from the statement itself or from the underlying statement table in order to craft +/// and dispatch the notification to the dispute coordinator. +/// +/// This also does bounds-checking on the validator index and will return an error if the +/// validator index is out of bounds for the current validator set. It's expected that +/// this should never happen due to the interface of the candidate backing subsystem - +/// the networking component responsible for feeding statements to the backing subsystem +/// is meant to check the signature and provenance of all statements before submission. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn dispatch_new_statement_to_dispute_coordinator( + ctx: &mut Context, + rp_state: &PerRelayParentState, + candidate_hash: CandidateHash, + statement: &SignedFullStatement, +) -> Result<(), ValidatorIndexOutOfBounds> { + // Dispatch the statement to the dispute coordinator. + let validator_index = statement.validator_index(); + let signing_context = + SigningContext { parent_hash: rp_state.parent, session_index: rp_state.session_index }; + + let validator_public = match rp_state.table_context.validators.get(validator_index.0 as usize) { + None => return Err(ValidatorIndexOutOfBounds), + Some(v) => v, + }; + + let maybe_candidate_receipt = match statement.payload() { + Statement::Seconded(receipt) => Some(receipt.to_plain()), + Statement::Valid(candidate_hash) => { + // Valid statements are only supposed to be imported + // once we've seen at least one `Seconded` statement. + rp_state.table.get_candidate(&candidate_hash).map(|c| c.to_plain()) + }, + }; + + let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement( + statement.as_unchecked(), + signing_context, + validator_public.clone(), + ) + .ok(); + + if let (Some(candidate_receipt), Some(dispute_statement)) = + (maybe_candidate_receipt, maybe_signed_dispute_statement) + { + ctx.send_message(DisputeCoordinatorMessage::ImportStatements { + candidate_hash, + candidate_receipt, + session: rp_state.session_index, + statements: vec![(dispute_statement, validator_index)], + pending_confirmation: None, + }) + .await; } Ok(()) } +/// Import a statement into the statement table and return the summary of the import. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn import_statement( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + statement: &SignedFullStatement, +) -> Result, Error> { + gum::debug!( + target: LOG_TARGET, + statement = ?statement.payload().to_compact(), + validator_index = statement.validator_index().0, + "Importing statement", + ); + + let candidate_hash = statement.payload().candidate_hash(); + + if let Err(ValidatorIndexOutOfBounds) = + dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, &statement) + .await + { + gum::warn!( + target: LOG_TARGET, + session_index = ?rp_state.session_index, + relay_parent = ?rp_state.parent, + validator_index = statement.validator_index().0, + "Supposedly 'Signed' statement has validator index out of bounds." + ); + + return Ok(None) + } + + let stmt = primitive_statement_to_table(statement); + + // TODO [now]: we violate the pre-existing checks that each validator may + // only second one candidate. + // + // We will need to address this so we don't get errors incorrectly. + let summary = rp_state.table.import_statement(&rp_state.table_context, stmt); + + if let Some(attested) = summary + .as_ref() + .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context)) + { + let candidate_hash = attested.candidate.hash(); + // `HashSet::insert` returns true if the thing wasn't in there already. + if rp_state.backed.insert(candidate_hash) { + if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + relay_parent = ?rp_state.parent, + para_id = %backed.candidate.descriptor.para_id, + "Candidate backed", + ); + + // The provisioner waits on candidate-backing, which means + // that we need to send unbounded messages to avoid cycles. + // + // Backed candidates are bounded by the number of validators, + // parachains, and the block production rate of the relay chain. + let message = ProvisionerMessage::ProvisionableData( + rp_state.parent, + ProvisionableData::BackedCandidate(backed.receipt()), + ); + ctx.send_unbounded_message(message); + } + } + } + + issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); + + Ok(summary) +} + +/// Check if there have happened any new misbehaviors and issue necessary messages. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +fn issue_new_misbehaviors( + ctx: &mut Context, + relay_parent: Hash, + table: &mut Table, +) { + // collect the misbehaviors to avoid double mutable self borrow issues + let misbehaviors: Vec<_> = table.drain_misbehaviors().collect(); + for (validator_id, report) in misbehaviors { + // The provisioner waits on candidate-backing, which means + // that we need to send unbounded messages to avoid cycles. + // + // Misbehaviors are bounded by the number of validators and + // the block production protocol. + ctx.send_unbounded_message(ProvisionerMessage::ProvisionableData( + relay_parent, + ProvisionableData::MisbehaviorReport(relay_parent, validator_id, report), + )); + } +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn sign_import_and_distribute_statement( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + statement: Statement, + keystore: SyncCryptoStorePtr, + metrics: &Metrics, +) -> Result, Error> { + if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { + import_statement(ctx, rp_state, &signed_statement).await?; + let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); + ctx.send_unbounded_message(smsg); + + Ok(Some(signed_statement)) + } else { + Ok(None) + } +} + struct JobAndSpan { job: CandidateBackingJob, span: PerLeafSpan, @@ -1277,23 +1478,6 @@ impl CandidateBackingJob { Ok(()) } - async fn sign_import_and_distribute_statement( - &mut self, - ctx: &mut Context, - statement: Statement, - root_span: &jaeger::Span, - ) -> Result, Error> { - if let Some(signed_statement) = self.sign_statement(statement).await { - self.import_statement(ctx, &signed_statement, root_span).await?; - let smsg = StatementDistributionMessage::Share(self.parent, signed_statement.clone()); - ctx.send_unbounded_message(smsg); - - Ok(Some(signed_statement)) - } else { - Ok(None) - } - } - /// Check if there have happened any new misbehaviors and issue necessary messages. fn issue_new_misbehaviors(&mut self, sender: &mut impl overseer::CandidateBackingSenderTrait) { // collect the misbehaviors to avoid double mutable self borrow issues @@ -1664,19 +1848,6 @@ impl CandidateBackingJob { Ok(()) } - async fn sign_statement(&mut self, statement: Statement) -> Option { - let signed = self - .table_context - .validator - .as_ref()? - .sign(self.keystore.clone(), statement) - .await - .ok() - .flatten()?; - self.metrics.on_statement_signed(); - Some(signed) - } - /// Insert or get the unbacked-span for the given candidate hash. fn insert_or_get_unbacked_span( &mut self, From b25acb7a5031864fd80aeb88b4783771568f0a07 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 17:47:59 -0500 Subject: [PATCH 17/54] handle statement imports outside of job logic --- node/core/backing/src/lib.rs | 439 +++++++++++++++-------------------- 1 file changed, 188 insertions(+), 251 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 49456211e316..38dc56399a2d 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -217,7 +217,7 @@ struct PerRelayParentState { /// These candidates are undergoing validation in the background. awaiting_validation: HashSet, /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`. - fallbacks: HashMap)>, + fallbacks: HashMap, } struct PerCandidateState { @@ -760,15 +760,18 @@ async fn handle_validated_candidate_command( } }, ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => { - if let Some((attesting, span)) = rp_state.fallbacks.get_mut(&candidate_hash) { + if let Some(attesting) = rp_state.fallbacks.get_mut(&candidate_hash) { if let Some(index) = attesting.backing.pop() { attesting.from_validator = index; - // Ok, another try: - let c_span = span.as_ref().map(|s| s.child("try")); let attesting = attesting.clone(); - // TODO [now]: kick off validation work is unimplemented - // rp_state.kick_off_validation_work(ctx, attesting, c_span).await? + kick_off_validation_work( + ctx, + rp_state, + &state.background_validation_tx, + attesting, + ) + .await?; } } else { gum::warn!( @@ -983,6 +986,183 @@ async fn sign_import_and_distribute_statement( } } +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn background_validate_and_make_available( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + params: BackgroundValidationParams< + impl overseer::CandidateBackingSenderTrait, + impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync, + >, +) -> Result<(), Error> { + let candidate_hash = params.candidate.hash(); + if rp_state.awaiting_validation.insert(candidate_hash) { + // spawn background task. + let bg = async move { + if let Err(e) = validate_and_make_available(params).await { + if let Error::BackgroundValidationMpsc(error) = e { + gum::debug!( + target: LOG_TARGET, + ?error, + "Mpsc background validation mpsc died during validation- leaf no longer active?" + ); + } else { + gum::error!( + target: LOG_TARGET, + "Failed to validate and make available: {:?}", + e + ); + } + } + }; + + ctx.spawn("backing-validation", bg.boxed()) + .map_err(|_| Error::FailedToSpawnBackgroundTask)?; + } + + Ok(()) +} + +/// Kick off validation work and distribute the result as a signed statement. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn kick_off_validation_work( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + attesting: AttestingData, +) -> Result<(), Error> { + let candidate_hash = attesting.candidate.hash(); + if rp_state.issued_statements.contains(&candidate_hash) { + return Ok(()) + } + + let descriptor = attesting.candidate.descriptor().clone(); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + candidate_receipt = ?attesting.candidate, + "Kicking off validation", + ); + + let bg_sender = ctx.sender().clone(); + let pov = PoVData::FetchFromValidator { + from_validator: attesting.from_validator, + candidate_hash, + pov_hash: attesting.pov_hash, + }; + + // TODO [now]: as we refactor validation to always take + // exhaustive parameters, this will need to change. + // + // Also, we will probably need to account for depth here, maybe. + background_validate_and_make_available( + ctx, + rp_state, + BackgroundValidationParams { + sender: bg_sender, + tx_command: background_validation_tx.clone(), + candidate: attesting.candidate, + relay_parent: rp_state.parent, + pov, + n_validators: rp_state.table_context.validators.len(), + span: None, + make_command: ValidatedCandidateCommand::Attest, + }, + ) + .await +} + +/// Import the statement and kick off validation work if it is a part of our assignment. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn maybe_validate_and_import( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatement, + metrics: &Metrics, +) -> Result<(), Error> { + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { + Some(r) => r, + None => { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + "Received statement for unknown relay-parent" + ); + + return Ok(()) + }, + }; + + if let Some(summary) = import_statement(ctx, rp_state, &statement).await? { + // TODO [now]: if this is a new candidate, we need to create + // an entry in the `PerCandidateState`. + + if Some(summary.group_id) != rp_state.assignment { + return Ok(()) + } + let attesting = match statement.payload() { + Statement::Seconded(receipt) => { + let candidate_hash = summary.candidate; + + let attesting = AttestingData { + candidate: rp_state + .table + .get_candidate(&candidate_hash) + .ok_or(Error::CandidateNotFound)? + .to_plain(), + pov_hash: receipt.descriptor.pov_hash, + from_validator: statement.validator_index(), + backing: Vec::new(), + }; + rp_state.fallbacks.insert(summary.candidate, attesting.clone()); + attesting + }, + Statement::Valid(candidate_hash) => { + if let Some(attesting) = rp_state.fallbacks.get_mut(candidate_hash) { + let our_index = rp_state.table_context.validator.as_ref().map(|v| v.index()); + if our_index == Some(statement.validator_index()) { + return Ok(()) + } + + if rp_state.awaiting_validation.contains(candidate_hash) { + // Job already running: + attesting.backing.push(statement.validator_index()); + return Ok(()) + } else { + // No job, so start another with current validator: + attesting.from_validator = statement.validator_index(); + attesting.clone() + } + } else { + return Ok(()) + } + }, + }; + + kick_off_validation_work(ctx, rp_state, &state.background_validation_tx, attesting).await?; + } + Ok(()) +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_statement_message( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatement, + metrics: &Metrics, +) -> Result<(), Error> { + let _timer = metrics.time_process_statement(); + + match maybe_validate_and_import(ctx, state, relay_parent, statement, metrics).await { + Err(Error::ValidationFailed(_)) => Ok(()), + Err(e) => Err(e), + Ok(()) => Ok(()), + } +} + struct JobAndSpan { job: CandidateBackingJob, span: PerLeafSpan, @@ -1478,177 +1658,6 @@ impl CandidateBackingJob { Ok(()) } - /// Check if there have happened any new misbehaviors and issue necessary messages. - fn issue_new_misbehaviors(&mut self, sender: &mut impl overseer::CandidateBackingSenderTrait) { - // collect the misbehaviors to avoid double mutable self borrow issues - let misbehaviors: Vec<_> = self.table.drain_misbehaviors().collect(); - for (validator_id, report) in misbehaviors { - // The provisioner waits on candidate-backing, which means - // that we need to send unbounded messages to avoid cycles. - // - // Misbehaviors are bounded by the number of validators and - // the block production protocol. - sender.send_unbounded_message(ProvisionerMessage::ProvisionableData( - self.parent, - ProvisionableData::MisbehaviorReport(self.parent, validator_id, report), - )); - } - } - - /// Import a statement into the statement table and return the summary of the import. - async fn import_statement( - &mut self, - ctx: &mut Context, - statement: &SignedFullStatement, - root_span: &jaeger::Span, - ) -> Result, Error> { - gum::debug!( - target: LOG_TARGET, - statement = ?statement.payload().to_compact(), - validator_index = statement.validator_index().0, - "Importing statement", - ); - - let candidate_hash = statement.payload().candidate_hash(); - let import_statement_span = { - // create a span only for candidates we're already aware of. - self.get_unbacked_statement_child( - root_span, - candidate_hash, - statement.validator_index(), - ) - }; - - if let Err(ValidatorIndexOutOfBounds) = self - .dispatch_new_statement_to_dispute_coordinator(ctx.sender(), candidate_hash, &statement) - .await - { - gum::warn!( - target: LOG_TARGET, - session_index = ?self.session_index, - relay_parent = ?self.parent, - validator_index = statement.validator_index().0, - "Supposedly 'Signed' statement has validator index out of bounds." - ); - - return Ok(None) - } - - let stmt = primitive_statement_to_table(statement); - - let summary = self.table.import_statement(&self.table_context, stmt); - - let unbacked_span = if let Some(attested) = summary - .as_ref() - .and_then(|s| self.table.attested_candidate(&s.candidate, &self.table_context)) - { - let candidate_hash = attested.candidate.hash(); - // `HashSet::insert` returns true if the thing wasn't in there already. - if self.backed.insert(candidate_hash) { - let span = self.remove_unbacked_span(&candidate_hash); - - if let Some(backed) = table_attested_to_backed(attested, &self.table_context) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - relay_parent = ?self.parent, - para_id = %backed.candidate.descriptor.para_id, - "Candidate backed", - ); - - // The provisioner waits on candidate-backing, which means - // that we need to send unbounded messages to avoid cycles. - // - // Backed candidates are bounded by the number of validators, - // parachains, and the block production rate of the relay chain. - let message = ProvisionerMessage::ProvisionableData( - self.parent, - ProvisionableData::BackedCandidate(backed.receipt()), - ); - ctx.send_unbounded_message(message); - - span.as_ref().map(|s| s.child("backed")); - span - } else { - None - } - } else { - None - } - } else { - None - }; - - self.issue_new_misbehaviors(ctx.sender()); - - // It is important that the child span is dropped before its parent span (`unbacked_span`) - drop(import_statement_span); - drop(unbacked_span); - - Ok(summary) - } - - /// The dispute coordinator keeps track of all statements by validators about every recent - /// candidate. - /// - /// When importing a statement, this should be called access the candidate receipt either - /// from the statement itself or from the underlying statement table in order to craft - /// and dispatch the notification to the dispute coordinator. - /// - /// This also does bounds-checking on the validator index and will return an error if the - /// validator index is out of bounds for the current validator set. It's expected that - /// this should never happen due to the interface of the candidate backing subsystem - - /// the networking component responsible for feeding statements to the backing subsystem - /// is meant to check the signature and provenance of all statements before submission. - async fn dispatch_new_statement_to_dispute_coordinator( - &self, - sender: &mut impl overseer::CandidateBackingSenderTrait, - candidate_hash: CandidateHash, - statement: &SignedFullStatement, - ) -> Result<(), ValidatorIndexOutOfBounds> { - // Dispatch the statement to the dispute coordinator. - let validator_index = statement.validator_index(); - let signing_context = - SigningContext { parent_hash: self.parent, session_index: self.session_index }; - - let validator_public = match self.table_context.validators.get(validator_index.0 as usize) { - None => return Err(ValidatorIndexOutOfBounds), - Some(v) => v, - }; - - let maybe_candidate_receipt = match statement.payload() { - Statement::Seconded(receipt) => Some(receipt.to_plain()), - Statement::Valid(candidate_hash) => { - // Valid statements are only supposed to be imported - // once we've seen at least one `Seconded` statement. - self.table.get_candidate(&candidate_hash).map(|c| c.to_plain()) - }, - }; - - let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement( - statement.as_unchecked(), - signing_context, - validator_public.clone(), - ) - .ok(); - - if let (Some(candidate_receipt), Some(dispute_statement)) = - (maybe_candidate_receipt, maybe_signed_dispute_statement) - { - sender - .send_message(DisputeCoordinatorMessage::ImportStatements { - candidate_hash, - candidate_receipt, - session: self.session_index, - statements: vec![(dispute_statement, validator_index)], - pending_confirmation: None, - }) - .await; - } - - Ok(()) - } - async fn handle_second_msg( &mut self, root_span: &jaeger::Span, @@ -1699,18 +1708,8 @@ impl CandidateBackingJob { ctx: &mut Context, statement: SignedFullStatement, ) -> Result<(), Error> { - let _timer = self.metrics.time_process_statement(); - let _span = root_span - .child("statement") - .with_stage(jaeger::Stage::CandidateBacking) - .with_candidate(statement.payload().candidate_hash()) - .with_relay_parent(self.parent); - - match self.maybe_validate_and_import(&root_span, ctx, statement).await { - Err(Error::ValidationFailed(_)) => Ok(()), - Err(e) => Err(e), - Ok(()) => Ok(()), - } + // function pending removal. + unimplemented!() } fn handle_get_backed_candidates_message( @@ -1786,68 +1785,6 @@ impl CandidateBackingJob { .await } - /// Import the statement and kick off validation work if it is a part of our assignment. - async fn maybe_validate_and_import( - &mut self, - root_span: &jaeger::Span, - ctx: &mut Context, - statement: SignedFullStatement, - ) -> Result<(), Error> { - if let Some(summary) = self.import_statement(ctx, &statement, root_span).await? { - if Some(summary.group_id) != self.assignment { - return Ok(()) - } - let (attesting, span) = match statement.payload() { - Statement::Seconded(receipt) => { - let candidate_hash = summary.candidate; - - let span = self.get_unbacked_validation_child( - root_span, - summary.candidate, - summary.group_id, - ); - - let attesting = AttestingData { - candidate: self - .table - .get_candidate(&candidate_hash) - .ok_or(Error::CandidateNotFound)? - .to_plain(), - pov_hash: receipt.descriptor.pov_hash, - from_validator: statement.validator_index(), - backing: Vec::new(), - }; - let child = span.as_ref().map(|s| s.child("try")); - self.fallbacks.insert(summary.candidate, (attesting.clone(), span)); - (attesting, child) - }, - Statement::Valid(candidate_hash) => { - if let Some((attesting, span)) = self.fallbacks.get_mut(candidate_hash) { - let our_index = self.table_context.validator.as_ref().map(|v| v.index()); - if our_index == Some(statement.validator_index()) { - return Ok(()) - } - - if self.awaiting_validation.contains(candidate_hash) { - // Job already running: - attesting.backing.push(statement.validator_index()); - return Ok(()) - } else { - // No job, so start another with current validator: - attesting.from_validator = statement.validator_index(); - (attesting.clone(), span.as_ref().map(|s| s.child("try"))) - } - } else { - return Ok(()) - } - }, - }; - - self.kick_off_validation_work(ctx, attesting, span).await?; - } - Ok(()) - } - /// Insert or get the unbacked-span for the given candidate hash. fn insert_or_get_unbacked_span( &mut self, From df95962598cab58297d88248f053e653a2c45d78 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 18:37:37 -0500 Subject: [PATCH 18/54] do some TODO planning for prospective parachains integration --- node/core/backing/src/lib.rs | 154 +++++++++++++++++++++++++++++++++-- 1 file changed, 147 insertions(+), 7 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 38dc56399a2d..515f436f5f3c 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -200,6 +200,7 @@ where } struct PerRelayParentState { + // TODO [now]: add a `ProspectiveParachainsMode` to the leaf. /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, /// The session index this corresponds to. @@ -267,6 +268,19 @@ struct State { per_leaf: HashMap, /// State tracked for all relay-parents backing work is ongoing for. This includes /// all active leaves. + /// + /// relay-parents fall into one of 3 categories. + /// 1. active leaves which do support prospective parachains + /// 2. active leaves which do not support prospective parachains + /// 3. relay-chain blocks which are ancestors of an active leaf and + /// do support prospective parachains. + /// + /// Relay-chain blocks which don't support prospective parachains are + /// never included in the fragment trees of active leaves which do. + /// + /// While it would be technically possible to support such leaves in + /// fragment trees, it only benefits the transition period when asynchronous + /// backing is being enabled and complicates code complexity. per_relay_parent: HashMap, /// State tracked for all candidates relevant to the implicit view. per_candidate: HashMap, @@ -704,16 +718,20 @@ async fn handle_validated_candidate_command( // outdated - we now allow seconding multiple candidates // per relay-parent. update it to properly defend against // seconding stuff wrongly. + // + // The way we'll do this is by asking the prospective parachains + // subsystem about the hypothetical depth of the candidate at all + // active leaves and then ensuring we've not seconded anything with + // those depths at any of our active leaves. if !rp_state.issued_statements.contains(&candidate_hash) { - // TODO [now]: note the candidate as seconded. - rp_state.issued_statements.insert(candidate_hash); - metrics.on_candidate_seconded(); - let statement = Statement::Seconded(CommittedCandidateReceipt { descriptor: candidate.descriptor.clone(), commitments, }); + // TODO [now]: if we get an Error::RejectedByProspectiveParachains, + // then the statement has not been distributed. In this case, + // we should expunge the candidate from the rp_state, if let Some(stmt) = sign_import_and_distribute_statement( ctx, rp_state, @@ -723,6 +741,11 @@ async fn handle_validated_candidate_command( ) .await? { + // TODO [now]: note the candidate as seconded in the + // per-candidate state. + rp_state.issued_statements.insert(candidate_hash); + + metrics.on_candidate_seconded(); ctx.send_message(CollatorProtocolMessage::Seconded( rp_state.parent, stmt, @@ -914,6 +937,17 @@ async fn import_statement( .as_ref() .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context)) { + // TODO [now] + // + // If this is a new candidate, we need to create an entry in the + // `PerCandidateState` map. + // + // If the relay parent supports prospective parachains, we also need + // to inform the prospective parachains subsystem of the seconded candidate + // If `ProspectiveParachainsMessage::Second` fails, then we expunge the + // statement from the table and return an error, which should be handled + // to avoid distribution of the statement. + let candidate_hash = attested.candidate.hash(); // `HashSet::insert` returns true if the thing wasn't in there already. if rp_state.backed.insert(candidate_hash) { @@ -926,6 +960,9 @@ async fn import_statement( "Candidate backed", ); + // TODO [now]: inform the prospective parachains subsystem + // that the candidate is now backed. + // The provisioner waits on candidate-backing, which means // that we need to send unbounded messages to avoid cycles. // @@ -977,6 +1014,10 @@ async fn sign_import_and_distribute_statement( ) -> Result, Error> { if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { import_statement(ctx, rp_state, &signed_statement).await?; + + // TODO [now]: if we get an Error::RejectedByProspectiveParachains, + // we _do not_ distribute - it has been expunged. + // Propagate the error onwards. let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); ctx.send_unbounded_message(smsg); @@ -1095,9 +1136,12 @@ async fn maybe_validate_and_import( }, }; + // TODO [now]: if we get an Error::RejectedByProspectiveParachains, + // we will do nothing. if let Some(summary) = import_statement(ctx, rp_state, &statement).await? { - // TODO [now]: if this is a new candidate, we need to create - // an entry in the `PerCandidateState`. + // import_statement already takes care of communicating with the + // prospective parachains subsystem. At this point, the candidate + // has already been accepted into the fragment trees. if Some(summary.group_id) != rp_state.assignment { return Ok(()) @@ -1163,6 +1207,102 @@ async fn handle_statement_message( } } +/// Kick off background validation with intent to second. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn validate_and_second( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + candidate: &CandidateReceipt, + pov: Arc, + background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, +) -> Result<(), Error> { + let candidate_hash = candidate.hash(); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + candidate_receipt = ?candidate, + "Validate and second candidate", + ); + + let bg_sender = ctx.sender().clone(); + background_validate_and_make_available( + ctx, + rp_state, + BackgroundValidationParams { + sender: bg_sender, + tx_command: background_validation_tx.clone(), + candidate: candidate.clone(), + relay_parent: rp_state.parent, + pov: PoVData::Ready(pov), + n_validators: rp_state.table_context.validators.len(), + span: None, + make_command: ValidatedCandidateCommand::Second, + }, + ) + .await?; + + Ok(()) +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_second_msg( + ctx: &mut Context, + state: &mut State, + candidate: CandidateReceipt, + pov: PoV, + metrics: &Metrics, +) -> Result<(), Error> { + let _timer = metrics.time_process_second(); + + let candidate_hash = candidate.hash(); + let relay_parent = candidate.descriptor().relay_parent; + + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?candidate_hash, + "We were asked to second a candidate outside of our view." + ); + + return Ok(()) + } + Some(r) => r, + }; + + // Sanity check that candidate is from our assignment. + if Some(candidate.descriptor().para_id) != rp_state.assignment { + gum::debug!( + target: LOG_TARGET, + our_assignment = ?rp_state.assignment, + collation = ?candidate.descriptor().para_id, + "Subsystem asked to second for para outside of our assignment", + ); + + return Ok(()) + } + + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a + // Seconded statement only if we have not seconded any other candidate and + // have not signed a Valid statement for the requested candidate. + // + // TODO [now]: this check is outdated. we need to only second when we have seconded + // nothing else with the hypothetical depth of the candidate in all our active leaves. + + // if self.seconded.is_none() { + // // This job has not seconded a candidate yet. + + // if !self.issued_statements.contains(&candidate_hash) { + // let pov = Arc::new(pov); + // self.validate_and_second(&span, &root_span, ctx, &candidate, pov).await?; + // } + // } + + Ok(()) +} + struct JobAndSpan { job: CandidateBackingJob, span: PerLeafSpan, @@ -1366,7 +1506,6 @@ async fn store_available_data( // // This will compute the erasure root internally and compare it to the expected erasure root. // This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`. - async fn make_pov_available( sender: &mut impl overseer::CandidateBackingSenderTrait, n_validators: usize, @@ -1429,6 +1568,7 @@ async fn request_candidate_validation( ) -> Result { let (tx, rx) = oneshot::channel(); + // TODO [now]: always do exhaustive validation. sender .send_message(CandidateValidationMessage::ValidateFromChainState( candidate_receipt, From 458d24da3e4bf0c51d03b51ac0783f9b0ef8b2c0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 18:46:10 -0500 Subject: [PATCH 19/54] finish rewriting backing subsystem in functional style --- node/core/backing/src/lib.rs | 2490 +++++++++++++++------------------- 1 file changed, 1095 insertions(+), 1395 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 515f436f5f3c..dac815015392 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -90,8 +90,8 @@ use polkadot_node_subsystem::{ ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage, }, - overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan, SpawnedSubsystem, - Stage, SubsystemError, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, + SubsystemError, }; use polkadot_node_subsystem_util::{ self as util, @@ -100,7 +100,7 @@ use polkadot_node_subsystem_util::{ request_validators, Validator, }; use polkadot_primitives::v2::{ - BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId, + BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, PersistedValidationData, SessionIndex, SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, @@ -364,8 +364,7 @@ async fn run_iteration( FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {} FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), FromOverseer::Communication { msg } => { - // TODO [now] - // handle_communication(&mut *ctx, view, msg).await?, + handle_communication(&mut *ctx, state, msg, metrics).await?; } } } @@ -373,1608 +372,1309 @@ async fn run_iteration( } } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_communication( - ctx: &mut Context, - view: &mut View, - message: CandidateBackingMessage, -) -> Result<(), Error> { - match message { - CandidateBackingMessage::Second(relay_parent, candidate, pov) => { - if let Some(job) = view.job_mut(&relay_parent) { - job.job.handle_second_msg(&job.span, ctx, candidate, pov).await?; - } - }, - CandidateBackingMessage::Statement(relay_parent, statement) => { - if let Some(job) = view.job_mut(&relay_parent) { - job.job.handle_statement_message(&job.span, ctx, statement).await?; - } - }, - CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) => - if let Some(job) = view.job_mut(&relay_parent) { - job.job.handle_get_backed_candidates_message(requested_candidates, tx)?; - }, - } - - Ok(()) +/// In case a backing validator does not provide a PoV, we need to retry with other backing +/// validators. +/// +/// This is the data needed to accomplish this. Basically all the data needed for spawning a +/// validation job and a list of backing validators, we can try. +#[derive(Clone)] +struct AttestingData { + /// The candidate to attest. + candidate: CandidateReceipt, + /// Hash of the PoV we need to fetch. + pov_hash: Hash, + /// Validator we are currently trying to get the PoV from. + from_validator: ValidatorIndex, + /// Other backing validators we can try in case `from_validator` failed. + backing: Vec, } -async fn prospective_parachains_mode( - _ctx: &mut Context, - _leaf_hash: Hash, -) -> ProspectiveParachainsMode { - // TODO [now]: this should be a runtime API version call - // cc https://github.com/paritytech/substrate/discussions/11338 - ProspectiveParachainsMode::Disabled +/// How many votes we need to consider a candidate backed. +/// +/// WARNING: This has to be kept in sync with the runtime check in the inclusion module. +fn minimum_votes(n_validators: usize) -> usize { + std::cmp::min(2, n_validators) } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_active_leaves_update( - ctx: &mut Context, - update: ActiveLeavesUpdate, - state: &mut State, - metrics: &Metrics, -) -> Result<(), Error> { - enum LeafHasProspectiveParachains { - Enabled(Result, ImplicitViewFetchError>), - Disabled, - } - - // Activate in implicit view before deactivate, per the docs - // on ImplicitView, this is more efficient. - let res = if let Some(leaf) = update.activated { - // Only activate in implicit view if prospective - // parachains are enabled. - let mode = prospective_parachains_mode(ctx, leaf.hash).await; +#[derive(Default)] +struct TableContext { + validator: Option, + groups: HashMap>, + validators: Vec, +} - let leaf_hash = leaf.hash; - Some(( - leaf, - match mode { - ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, - ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled( - state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await, - ), - }, - )) - } else { - None - }; +impl TableContextTrait for TableContext { + type AuthorityId = ValidatorIndex; + type Digest = CandidateHash; + type GroupId = ParaId; + type Signature = ValidatorSignature; + type Candidate = CommittedCandidateReceipt; - for deactivated in update.deactivated { - state.per_leaf.remove(&deactivated); - state.implicit_view.deactivate_leaf(deactivated); + fn candidate_digest(candidate: &CommittedCandidateReceipt) -> CandidateHash { + candidate.hash() } - // clean up `per_relay_parent` according to ancestry - // of leaves. we do this so we can clean up candidates right after - // as a result. - // - // when prospective parachains are disabled, the implicit view is empty, - // which means we'll clean up everything. This is correct. - for relay_parent in state.implicit_view.all_allowed_relay_parents() { - state.per_relay_parent.remove(relay_parent); + fn candidate_group(candidate: &CommittedCandidateReceipt) -> ParaId { + candidate.descriptor().para_id } - // clean up `per_candidate` according to which relay-parents - // are known. - // - // when prospective parachains are disabled, we clean up all candidates - // because we've cleaned up all relay parents. this is correct. - state - .per_candidate - .retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent)); - - // Get relay parents which might be fresh but might be known already - // that are explicit or implicit from the new active leaf. - let fresh_relay_parents = match res { - None => return Ok(()), - Some((leaf, LeafHasProspectiveParachains::Disabled)) => { - // defensive in this case - for enabled, this manifests as an error. - if state.per_leaf.contains_key(&leaf.hash) { - return Ok(()) - } - - state.per_leaf.insert( - leaf.hash, - ActiveLeafState { - prospective_parachains_mode: ProspectiveParachainsMode::Disabled, - // This is empty because the only allowed relay-parent and depth - // when prospective parachains are disabled is the leaf hash and 0, - // respectively. We've just learned about the leaf hash, so we cannot - // have any candidates seconded with it as a relay-parent yet. - seconded_at_depth: BTreeMap::new(), - }, - ); - - vec![leaf.hash] - }, - Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => { - let fresh_relay_parents = - state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); + fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { + self.groups + .get(group) + .map_or(false, |g| g.iter().position(|a| a == authority).is_some()) + } - // At this point, all candidates outside of the implicit view - // have been cleaned up. For all which remain, which we've seconded, - // we ask the prospective parachains subsystem where they land in the fragment - // tree for the given active leaf. This comprises our `seconded_at_depth`. + fn requisite_votes(&self, group: &ParaId) -> usize { + self.groups.get(group).map_or(usize::MAX, |g| minimum_votes(g.len())) + } +} - let remaining_seconded = state - .per_candidate - .iter() - .filter(|(_, cd)| cd.seconded_locally) - .map(|(c_hash, cd)| (*c_hash, cd.para_id)); +struct InvalidErasureRoot; - // one-to-one correspondence to remaining_seconded - let mut membership_answers = FuturesOrdered::new(); +// It looks like it's not possible to do an `impl From` given the current state of +// the code. So this does the necessary conversion. +fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { + let statement = match s.payload() { + Statement::Seconded(c) => TableStatement::Seconded(c.clone()), + Statement::Valid(h) => TableStatement::Valid(h.clone()), + }; - for (candidate_hash, para_id) in remaining_seconded { - let (tx, rx) = oneshot::channel(); - membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership))); + TableSignedStatement { + statement, + signature: s.signature().clone(), + sender: s.validator_index(), + } +} - ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership( - para_id, - candidate_hash, - tx, - )) - .await; - } +fn table_attested_to_backed( + attested: TableAttestedCandidate< + ParaId, + CommittedCandidateReceipt, + ValidatorIndex, + ValidatorSignature, + >, + table_context: &TableContext, +) -> Option { + let TableAttestedCandidate { candidate, validity_votes, group_id: para_id } = attested; - let mut seconded_at_depth = BTreeMap::new(); - for response in membership_answers.next().await { - match response { - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - "Prospective parachains subsystem unreachable for membership request", - ); + let (ids, validity_votes): (Vec<_>, Vec) = + validity_votes.into_iter().map(|(id, vote)| (id, vote.into())).unzip(); - continue - }, - Ok((candidate_hash, membership)) => { - // This request gives membership in all fragment trees. We have some - // wasted data here, and it can be optimized if it proves - // relevant to performance. - if let Some((_, depths)) = - membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash) - { - for depth in depths { - seconded_at_depth.insert(depth, candidate_hash); - } - } - }, - } - } + let group = table_context.groups.get(¶_id)?; - state.per_leaf.insert( - leaf.hash, - ActiveLeafState { - prospective_parachains_mode: ProspectiveParachainsMode::Enabled, - seconded_at_depth, - }, - ); + let mut validator_indices = BitVec::with_capacity(group.len()); - match fresh_relay_parents { - Some(f) => f.to_vec(), - None => { - gum::warn!( - target: LOG_TARGET, - leaf_hash = ?leaf.hash, - "Implicit view gave no relay-parents" - ); + validator_indices.resize(group.len(), false); - vec![leaf.hash] - }, - } - }, - Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => { - gum::debug!( + // The order of the validity votes in the backed candidate must match + // the order of bits set in the bitfield, which is not necessarily + // the order of the `validity_votes` we got from the table. + let mut vote_positions = Vec::with_capacity(validity_votes.len()); + for (orig_idx, id) in ids.iter().enumerate() { + if let Some(position) = group.iter().position(|x| x == id) { + validator_indices.set(position, true); + vote_positions.push((orig_idx, position)); + } else { + gum::warn!( target: LOG_TARGET, - leaf_hash = ?leaf.hash, - err = ?e, - "Failed to load implicit view for leaf." + "Logic error: Validity vote from table does not correspond to group", ); - return Ok(()) - }, - }; - - // add entries in `per_relay_parent`. for all new relay-parents. - for maybe_new in fresh_relay_parents { - if state.per_relay_parent.contains_key(&maybe_new) { - continue + return None } + } + vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); - // construct a `PerRelayParent` from the runtime API - // and insert it. - let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore).await?; + Some(BackedCandidate { + candidate, + validity_votes: vote_positions + .into_iter() + .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) + .collect(), + validator_indices, + }) +} - if let Some(per) = per { - state.per_relay_parent.insert(maybe_new, per); - } - } +async fn store_available_data( + sender: &mut impl overseer::CandidateBackingSenderTrait, + n_validators: u32, + candidate_hash: CandidateHash, + available_data: AvailableData, +) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + sender + .send_message(AvailabilityStoreMessage::StoreAvailableData { + candidate_hash, + n_validators, + available_data, + tx, + }) + .await; + + let _ = rx.await.map_err(Error::StoreAvailableData)?; Ok(()) } -/// Load the data necessary to do backing work on top of a relay-parent. -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn construct_per_relay_parent_state( - ctx: &mut Context, - relay_parent: Hash, - keystore: &SyncCryptoStorePtr, -) -> Result, Error> { - macro_rules! try_runtime_api { - ($x: expr) => { - match $x { - Ok(x) => x, - Err(e) => { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Failed to fetch runtime API data for job", - ); +// Make a `PoV` available. +// +// This will compute the erasure root internally and compare it to the expected erasure root. +// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`. +async fn make_pov_available( + sender: &mut impl overseer::CandidateBackingSenderTrait, + n_validators: usize, + pov: Arc, + candidate_hash: CandidateHash, + validation_data: PersistedValidationData, + expected_erasure_root: Hash, + span: Option<&jaeger::Span>, +) -> Result, Error> { + let available_data = AvailableData { pov, validation_data }; - // We can't do candidate validation work if we don't have the - // requisite runtime API data. But these errors should not take - // down the node. - return Ok(None); - } - } + { + let _span = span.as_ref().map(|s| s.child("erasure-coding").with_candidate(candidate_hash)); + + let chunks = erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; + + let branches = erasure_coding::branches(chunks.as_ref()); + let erasure_root = branches.root(); + + if erasure_root != expected_erasure_root { + return Ok(Err(InvalidErasureRoot)) } } - let parent = relay_parent; + { + let _span = span.as_ref().map(|s| s.child("store-data").with_candidate(candidate_hash)); - let (validators, groups, session_index, cores) = futures::try_join!( - request_validators(parent, ctx.sender()).await, - request_validator_groups(parent, ctx.sender()).await, - request_session_index_for_child(parent, ctx.sender()).await, - request_from_runtime(parent, ctx.sender(), |tx| { - RuntimeApiRequest::AvailabilityCores(tx) - },) - .await, - ) - .map_err(Error::JoinMultiple)?; + store_available_data(sender, n_validators as u32, candidate_hash, available_data).await?; + } - let validators: Vec<_> = try_runtime_api!(validators); - let (validator_groups, group_rotation_info) = try_runtime_api!(groups); - let session_index = try_runtime_api!(session_index); - let cores = try_runtime_api!(cores); + Ok(Ok(())) +} - let signing_context = SigningContext { parent_hash: parent, session_index }; - let validator = - match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await { - Ok(v) => Some(v), - Err(util::Error::NotAValidator) => None, - Err(e) => { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Cannot participate in candidate backing", - ); +async fn request_pov( + sender: &mut impl overseer::CandidateBackingSenderTrait, + relay_parent: Hash, + from_validator: ValidatorIndex, + candidate_hash: CandidateHash, + pov_hash: Hash, +) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + sender + .send_message(AvailabilityDistributionMessage::FetchPoV { + relay_parent, + from_validator, + candidate_hash, + pov_hash, + tx, + }) + .await; - return Ok(None) - }, - }; + let pov = rx.await.map_err(|_| Error::FetchPoV)?; + Ok(Arc::new(pov)) +} - let mut groups = HashMap::new(); - let n_cores = cores.len(); - let mut assignment = None; +async fn request_candidate_validation( + sender: &mut impl overseer::CandidateBackingSenderTrait, + candidate_receipt: CandidateReceipt, + pov: Arc, +) -> Result { + let (tx, rx) = oneshot::channel(); - for (idx, core) in cores.into_iter().enumerate() { - // Ignore prospective assignments on occupied cores for the time being. - if let CoreState::Scheduled(scheduled) = core { - let core_index = CoreIndex(idx as _); - let group_index = group_rotation_info.group_for_core(core_index, n_cores); - if let Some(g) = validator_groups.get(group_index.0 as usize) { - if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some((scheduled.para_id, scheduled.collator)); - } - groups.insert(scheduled.para_id, g.clone()); - } - } - } + // TODO [now]: always do exhaustive validation. + sender + .send_message(CandidateValidationMessage::ValidateFromChainState( + candidate_receipt, + pov, + BACKING_EXECUTION_TIMEOUT, + tx, + )) + .await; - let table_context = TableContext { groups, validators, validator }; + match rx.await { + Ok(Ok(validation_result)) => Ok(validation_result), + Ok(Err(err)) => Err(Error::ValidationFailed(err)), + Err(err) => Err(Error::ValidateFromChainState(err)), + } +} - // TODO [now]: I've removed the `required_collator` more broadly, - // because it's not used in practice and was intended for parathreads. - // - // We should attempt parathreads another way, I think, so it makes sense - // to remove. - let assignment = assignment.map(|(a, _required_collator)| a); +type BackgroundValidationResult = + Result<(CandidateReceipt, CandidateCommitments, Arc), CandidateReceipt>; - Ok(Some(PerRelayParentState { - parent, - session_index, - assignment, - backed: HashSet::new(), - table: Table::default(), - table_context, - issued_statements: HashSet::new(), - awaiting_validation: HashSet::new(), - fallbacks: HashMap::new(), - })) +struct BackgroundValidationParams { + sender: S, + tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + candidate: CandidateReceipt, + relay_parent: Hash, + pov: PoVData, + n_validators: usize, + span: Option, + make_command: F, } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_validated_candidate_command( - ctx: &mut Context, - state: &mut State, - relay_parent: Hash, - command: ValidatedCandidateCommand, - metrics: &Metrics, +async fn validate_and_make_available( + params: BackgroundValidationParams< + impl overseer::CandidateBackingSenderTrait, + impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Sync, + >, ) -> Result<(), Error> { - match state.per_relay_parent.get_mut(&relay_parent) { - Some(rp_state) => { - let candidate_hash = command.candidate_hash(); - rp_state.awaiting_validation.remove(&candidate_hash); - - match command { - ValidatedCandidateCommand::Second(res) => match res { - Ok((candidate, commitments, _)) => { - // sanity check. - // TODO [now]: this sanity check is almost certainly - // outdated - we now allow seconding multiple candidates - // per relay-parent. update it to properly defend against - // seconding stuff wrongly. - // - // The way we'll do this is by asking the prospective parachains - // subsystem about the hypothetical depth of the candidate at all - // active leaves and then ensuring we've not seconded anything with - // those depths at any of our active leaves. - if !rp_state.issued_statements.contains(&candidate_hash) { - let statement = Statement::Seconded(CommittedCandidateReceipt { - descriptor: candidate.descriptor.clone(), - commitments, - }); + let BackgroundValidationParams { + mut sender, + mut tx_command, + candidate, + relay_parent, + pov, + n_validators, + span, + make_command, + } = params; - // TODO [now]: if we get an Error::RejectedByProspectiveParachains, - // then the statement has not been distributed. In this case, - // we should expunge the candidate from the rp_state, - if let Some(stmt) = sign_import_and_distribute_statement( - ctx, - rp_state, - statement, - state.keystore.clone(), - metrics, - ) - .await? - { - // TODO [now]: note the candidate as seconded in the - // per-candidate state. - rp_state.issued_statements.insert(candidate_hash); - - metrics.on_candidate_seconded(); - ctx.send_message(CollatorProtocolMessage::Seconded( - rp_state.parent, - stmt, - )) - .await; - } - } - }, - Err(candidate) => { - ctx.send_message(CollatorProtocolMessage::Invalid( - rp_state.parent, - candidate, + let pov = match pov { + PoVData::Ready(pov) => pov, + PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => { + let _span = span.as_ref().map(|s| s.child("request-pov")); + match request_pov(&mut sender, relay_parent, from_validator, candidate_hash, pov_hash) + .await + { + Err(Error::FetchPoV) => { + tx_command + .send(( + relay_parent, + ValidatedCandidateCommand::AttestNoPoV(candidate.hash()), )) - .await; - }, + .await + .map_err(Error::BackgroundValidationMpsc)?; + return Ok(()) }, - ValidatedCandidateCommand::Attest(res) => { - // We are done - avoid new validation spawns: - rp_state.fallbacks.remove(&candidate_hash); - // sanity check. - if !rp_state.issued_statements.contains(&candidate_hash) { - if res.is_ok() { - let statement = Statement::Valid(candidate_hash); + Err(err) => return Err(err), + Ok(pov) => pov, + } + }, + }; - sign_import_and_distribute_statement( - ctx, - rp_state, - statement, - state.keystore.clone(), - metrics, - ) - .await?; - } - rp_state.issued_statements.insert(candidate_hash); - } - }, - ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => { - if let Some(attesting) = rp_state.fallbacks.get_mut(&candidate_hash) { - if let Some(index) = attesting.backing.pop() { - attesting.from_validator = index; - let attesting = attesting.clone(); + let v = { + let _span = span.as_ref().map(|s| { + s.child("request-validation") + .with_pov(&pov) + .with_para_id(candidate.descriptor().para_id) + }); + request_candidate_validation(&mut sender, candidate.clone(), pov.clone()).await? + }; - kick_off_validation_work( - ctx, - rp_state, - &state.background_validation_tx, - attesting, - ) - .await?; - } - } else { - gum::warn!( - target: LOG_TARGET, - "AttestNoPoV was triggered without fallback being available." - ); - debug_assert!(false); - } + let res = match v { + ValidationResult::Valid(commitments, validation_data) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate.hash(), + "Validation successful", + ); + + let erasure_valid = make_pov_available( + &mut sender, + n_validators, + pov.clone(), + candidate.hash(), + validation_data, + candidate.descriptor.erasure_root, + span.as_ref(), + ) + .await?; + + match erasure_valid { + Ok(()) => Ok((candidate, commitments, pov.clone())), + Err(InvalidErasureRoot) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate.hash(), + actual_commitments = ?commitments, + "Erasure root doesn't match the announced by the candidate receipt", + ); + Err(candidate) }, } }, - None => { - // simple race condition; can be ignored = this relay-parent - // is no longer relevant. + ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch) => { + // If validation produces a new set of commitments, we vote the candidate as invalid. + gum::warn!( + target: LOG_TARGET, + candidate_hash = ?candidate.hash(), + "Validation yielded different commitments", + ); + Err(candidate) }, - } + ValidationResult::Invalid(reason) => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate.hash(), + reason = ?reason, + "Validation yielded an invalid candidate", + ); + Err(candidate) + }, + }; - Ok(()) + tx_command.send((relay_parent, make_command(res))).await.map_err(Into::into) } -async fn sign_statement( - rp_state: &PerRelayParentState, - statement: Statement, - keystore: SyncCryptoStorePtr, - metrics: &Metrics, -) -> Option { - let signed = rp_state - .table_context - .validator - .as_ref()? - .sign(keystore, statement) - .await - .ok() - .flatten()?; - metrics.on_statement_signed(); - Some(signed) -} +struct ValidatorIndexOutOfBounds; -/// The dispute coordinator keeps track of all statements by validators about every recent -/// candidate. -/// -/// When importing a statement, this should be called access the candidate receipt either -/// from the statement itself or from the underlying statement table in order to craft -/// and dispatch the notification to the dispute coordinator. -/// -/// This also does bounds-checking on the validator index and will return an error if the -/// validator index is out of bounds for the current validator set. It's expected that -/// this should never happen due to the interface of the candidate backing subsystem - -/// the networking component responsible for feeding statements to the backing subsystem -/// is meant to check the signature and provenance of all statements before submission. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn dispatch_new_statement_to_dispute_coordinator( +async fn handle_communication( ctx: &mut Context, - rp_state: &PerRelayParentState, - candidate_hash: CandidateHash, - statement: &SignedFullStatement, -) -> Result<(), ValidatorIndexOutOfBounds> { - // Dispatch the statement to the dispute coordinator. - let validator_index = statement.validator_index(); - let signing_context = - SigningContext { parent_hash: rp_state.parent, session_index: rp_state.session_index }; - - let validator_public = match rp_state.table_context.validators.get(validator_index.0 as usize) { - None => return Err(ValidatorIndexOutOfBounds), - Some(v) => v, - }; - - let maybe_candidate_receipt = match statement.payload() { - Statement::Seconded(receipt) => Some(receipt.to_plain()), - Statement::Valid(candidate_hash) => { - // Valid statements are only supposed to be imported - // once we've seen at least one `Seconded` statement. - rp_state.table.get_candidate(&candidate_hash).map(|c| c.to_plain()) + state: &mut State, + message: CandidateBackingMessage, + metrics: &Metrics, +) -> Result<(), Error> { + match message { + CandidateBackingMessage::Second(_relay_parent, candidate, pov) => { + handle_second_message( + ctx, + state, + candidate, + pov, + metrics, + ).await?; }, - }; - - let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement( - statement.as_unchecked(), - signing_context, - validator_public.clone(), - ) - .ok(); - - if let (Some(candidate_receipt), Some(dispute_statement)) = - (maybe_candidate_receipt, maybe_signed_dispute_statement) - { - ctx.send_message(DisputeCoordinatorMessage::ImportStatements { - candidate_hash, - candidate_receipt, - session: rp_state.session_index, - statements: vec![(dispute_statement, validator_index)], - pending_confirmation: None, - }) - .await; + CandidateBackingMessage::Statement(relay_parent, statement) => { + handle_statement_message( + ctx, + state, + relay_parent, + statement, + metrics, + ).await?; + }, + CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) => + if let Some(rp_state) = state.per_relay_parent.get(&relay_parent) { + handle_get_backed_candidates_message(rp_state, requested_candidates, tx, metrics)?; + }, } Ok(()) } -/// Import a statement into the statement table and return the summary of the import. +async fn prospective_parachains_mode( + _ctx: &mut Context, + _leaf_hash: Hash, +) -> ProspectiveParachainsMode { + // TODO [now]: this should be a runtime API version call + // cc https://github.com/paritytech/substrate/discussions/11338 + ProspectiveParachainsMode::Disabled +} + #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn import_statement( +async fn handle_active_leaves_update( ctx: &mut Context, - rp_state: &mut PerRelayParentState, - statement: &SignedFullStatement, -) -> Result, Error> { - gum::debug!( - target: LOG_TARGET, - statement = ?statement.payload().to_compact(), - validator_index = statement.validator_index().0, - "Importing statement", - ); - - let candidate_hash = statement.payload().candidate_hash(); + update: ActiveLeavesUpdate, + state: &mut State, + metrics: &Metrics, +) -> Result<(), Error> { + enum LeafHasProspectiveParachains { + Enabled(Result, ImplicitViewFetchError>), + Disabled, + } - if let Err(ValidatorIndexOutOfBounds) = - dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, &statement) - .await - { - gum::warn!( - target: LOG_TARGET, - session_index = ?rp_state.session_index, - relay_parent = ?rp_state.parent, - validator_index = statement.validator_index().0, - "Supposedly 'Signed' statement has validator index out of bounds." - ); + // Activate in implicit view before deactivate, per the docs + // on ImplicitView, this is more efficient. + let res = if let Some(leaf) = update.activated { + // Only activate in implicit view if prospective + // parachains are enabled. + let mode = prospective_parachains_mode(ctx, leaf.hash).await; - return Ok(None) - } + let leaf_hash = leaf.hash; + Some(( + leaf, + match mode { + ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, + ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled( + state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await, + ), + }, + )) + } else { + None + }; - let stmt = primitive_statement_to_table(statement); + for deactivated in update.deactivated { + state.per_leaf.remove(&deactivated); + state.implicit_view.deactivate_leaf(deactivated); + } - // TODO [now]: we violate the pre-existing checks that each validator may - // only second one candidate. + // clean up `per_relay_parent` according to ancestry + // of leaves. we do this so we can clean up candidates right after + // as a result. // - // We will need to address this so we don't get errors incorrectly. - let summary = rp_state.table.import_statement(&rp_state.table_context, stmt); + // when prospective parachains are disabled, the implicit view is empty, + // which means we'll clean up everything. This is correct. + for relay_parent in state.implicit_view.all_allowed_relay_parents() { + state.per_relay_parent.remove(relay_parent); + } - if let Some(attested) = summary - .as_ref() - .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context)) - { - // TODO [now] - // - // If this is a new candidate, we need to create an entry in the - // `PerCandidateState` map. - // - // If the relay parent supports prospective parachains, we also need - // to inform the prospective parachains subsystem of the seconded candidate - // If `ProspectiveParachainsMessage::Second` fails, then we expunge the - // statement from the table and return an error, which should be handled - // to avoid distribution of the statement. + // clean up `per_candidate` according to which relay-parents + // are known. + // + // when prospective parachains are disabled, we clean up all candidates + // because we've cleaned up all relay parents. this is correct. + state + .per_candidate + .retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent)); - let candidate_hash = attested.candidate.hash(); - // `HashSet::insert` returns true if the thing wasn't in there already. - if rp_state.backed.insert(candidate_hash) { - if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - relay_parent = ?rp_state.parent, - para_id = %backed.candidate.descriptor.para_id, - "Candidate backed", - ); + // Get relay parents which might be fresh but might be known already + // that are explicit or implicit from the new active leaf. + let fresh_relay_parents = match res { + None => return Ok(()), + Some((leaf, LeafHasProspectiveParachains::Disabled)) => { + // defensive in this case - for enabled, this manifests as an error. + if state.per_leaf.contains_key(&leaf.hash) { + return Ok(()) + } - // TODO [now]: inform the prospective parachains subsystem - // that the candidate is now backed. + state.per_leaf.insert( + leaf.hash, + ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode::Disabled, + // This is empty because the only allowed relay-parent and depth + // when prospective parachains are disabled is the leaf hash and 0, + // respectively. We've just learned about the leaf hash, so we cannot + // have any candidates seconded with it as a relay-parent yet. + seconded_at_depth: BTreeMap::new(), + }, + ); - // The provisioner waits on candidate-backing, which means - // that we need to send unbounded messages to avoid cycles. - // - // Backed candidates are bounded by the number of validators, - // parachains, and the block production rate of the relay chain. - let message = ProvisionerMessage::ProvisionableData( - rp_state.parent, - ProvisionableData::BackedCandidate(backed.receipt()), - ); - ctx.send_unbounded_message(message); - } - } - } + vec![leaf.hash] + }, + Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => { + let fresh_relay_parents = + state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); - issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); + // At this point, all candidates outside of the implicit view + // have been cleaned up. For all which remain, which we've seconded, + // we ask the prospective parachains subsystem where they land in the fragment + // tree for the given active leaf. This comprises our `seconded_at_depth`. - Ok(summary) -} + let remaining_seconded = state + .per_candidate + .iter() + .filter(|(_, cd)| cd.seconded_locally) + .map(|(c_hash, cd)| (*c_hash, cd.para_id)); -/// Check if there have happened any new misbehaviors and issue necessary messages. -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -fn issue_new_misbehaviors( - ctx: &mut Context, - relay_parent: Hash, - table: &mut Table, -) { - // collect the misbehaviors to avoid double mutable self borrow issues - let misbehaviors: Vec<_> = table.drain_misbehaviors().collect(); - for (validator_id, report) in misbehaviors { - // The provisioner waits on candidate-backing, which means - // that we need to send unbounded messages to avoid cycles. - // - // Misbehaviors are bounded by the number of validators and - // the block production protocol. - ctx.send_unbounded_message(ProvisionerMessage::ProvisionableData( - relay_parent, - ProvisionableData::MisbehaviorReport(relay_parent, validator_id, report), - )); - } -} + // one-to-one correspondence to remaining_seconded + let mut membership_answers = FuturesOrdered::new(); -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn sign_import_and_distribute_statement( - ctx: &mut Context, - rp_state: &mut PerRelayParentState, - statement: Statement, - keystore: SyncCryptoStorePtr, - metrics: &Metrics, -) -> Result, Error> { - if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { - import_statement(ctx, rp_state, &signed_statement).await?; + for (candidate_hash, para_id) in remaining_seconded { + let (tx, rx) = oneshot::channel(); + membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership))); - // TODO [now]: if we get an Error::RejectedByProspectiveParachains, - // we _do not_ distribute - it has been expunged. - // Propagate the error onwards. - let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); - ctx.send_unbounded_message(smsg); + ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership( + para_id, + candidate_hash, + tx, + )) + .await; + } - Ok(Some(signed_statement)) - } else { - Ok(None) - } -} + let mut seconded_at_depth = BTreeMap::new(); + for response in membership_answers.next().await { + match response { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Prospective parachains subsystem unreachable for membership request", + ); -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn background_validate_and_make_available( - ctx: &mut Context, - rp_state: &mut PerRelayParentState, - params: BackgroundValidationParams< - impl overseer::CandidateBackingSenderTrait, - impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync, - >, -) -> Result<(), Error> { - let candidate_hash = params.candidate.hash(); - if rp_state.awaiting_validation.insert(candidate_hash) { - // spawn background task. - let bg = async move { - if let Err(e) = validate_and_make_available(params).await { - if let Error::BackgroundValidationMpsc(error) = e { - gum::debug!( - target: LOG_TARGET, - ?error, - "Mpsc background validation mpsc died during validation- leaf no longer active?" - ); - } else { - gum::error!( - target: LOG_TARGET, - "Failed to validate and make available: {:?}", - e - ); + continue + }, + Ok((candidate_hash, membership)) => { + // This request gives membership in all fragment trees. We have some + // wasted data here, and it can be optimized if it proves + // relevant to performance. + if let Some((_, depths)) = + membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash) + { + for depth in depths { + seconded_at_depth.insert(depth, candidate_hash); + } + } + }, } } - }; - ctx.spawn("backing-validation", bg.boxed()) - .map_err(|_| Error::FailedToSpawnBackgroundTask)?; - } + state.per_leaf.insert( + leaf.hash, + ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode::Enabled, + seconded_at_depth, + }, + ); - Ok(()) -} + match fresh_relay_parents { + Some(f) => f.to_vec(), + None => { + gum::warn!( + target: LOG_TARGET, + leaf_hash = ?leaf.hash, + "Implicit view gave no relay-parents" + ); -/// Kick off validation work and distribute the result as a signed statement. -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn kick_off_validation_work( - ctx: &mut Context, - rp_state: &mut PerRelayParentState, - background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, - attesting: AttestingData, -) -> Result<(), Error> { - let candidate_hash = attesting.candidate.hash(); - if rp_state.issued_statements.contains(&candidate_hash) { - return Ok(()) - } + vec![leaf.hash] + }, + } + }, + Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?leaf.hash, + err = ?e, + "Failed to load implicit view for leaf." + ); - let descriptor = attesting.candidate.descriptor().clone(); + return Ok(()) + }, + }; - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - candidate_receipt = ?attesting.candidate, - "Kicking off validation", - ); + // add entries in `per_relay_parent`. for all new relay-parents. + for maybe_new in fresh_relay_parents { + if state.per_relay_parent.contains_key(&maybe_new) { + continue + } - let bg_sender = ctx.sender().clone(); - let pov = PoVData::FetchFromValidator { - from_validator: attesting.from_validator, - candidate_hash, - pov_hash: attesting.pov_hash, - }; + // construct a `PerRelayParent` from the runtime API + // and insert it. + let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore).await?; - // TODO [now]: as we refactor validation to always take - // exhaustive parameters, this will need to change. - // - // Also, we will probably need to account for depth here, maybe. - background_validate_and_make_available( - ctx, - rp_state, - BackgroundValidationParams { - sender: bg_sender, - tx_command: background_validation_tx.clone(), - candidate: attesting.candidate, - relay_parent: rp_state.parent, - pov, - n_validators: rp_state.table_context.validators.len(), - span: None, - make_command: ValidatedCandidateCommand::Attest, - }, - ) - .await + if let Some(per) = per { + state.per_relay_parent.insert(maybe_new, per); + } + } + + Ok(()) } -/// Import the statement and kick off validation work if it is a part of our assignment. +/// Load the data necessary to do backing work on top of a relay-parent. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn maybe_validate_and_import( +async fn construct_per_relay_parent_state( ctx: &mut Context, - state: &mut State, relay_parent: Hash, - statement: SignedFullStatement, - metrics: &Metrics, -) -> Result<(), Error> { - let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { - Some(r) => r, - None => { - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - "Received statement for unknown relay-parent" - ); + keystore: &SyncCryptoStorePtr, +) -> Result, Error> { + macro_rules! try_runtime_api { + ($x: expr) => { + match $x { + Ok(x) => x, + Err(e) => { + gum::warn!( + target: LOG_TARGET, + err = ?e, + "Failed to fetch runtime API data for job", + ); - return Ok(()) - }, - }; + // We can't do candidate validation work if we don't have the + // requisite runtime API data. But these errors should not take + // down the node. + return Ok(None); + } + } + } + } - // TODO [now]: if we get an Error::RejectedByProspectiveParachains, - // we will do nothing. - if let Some(summary) = import_statement(ctx, rp_state, &statement).await? { - // import_statement already takes care of communicating with the - // prospective parachains subsystem. At this point, the candidate - // has already been accepted into the fragment trees. + let parent = relay_parent; - if Some(summary.group_id) != rp_state.assignment { - return Ok(()) - } - let attesting = match statement.payload() { - Statement::Seconded(receipt) => { - let candidate_hash = summary.candidate; + let (validators, groups, session_index, cores) = futures::try_join!( + request_validators(parent, ctx.sender()).await, + request_validator_groups(parent, ctx.sender()).await, + request_session_index_for_child(parent, ctx.sender()).await, + request_from_runtime(parent, ctx.sender(), |tx| { + RuntimeApiRequest::AvailabilityCores(tx) + },) + .await, + ) + .map_err(Error::JoinMultiple)?; - let attesting = AttestingData { - candidate: rp_state - .table - .get_candidate(&candidate_hash) - .ok_or(Error::CandidateNotFound)? - .to_plain(), - pov_hash: receipt.descriptor.pov_hash, - from_validator: statement.validator_index(), - backing: Vec::new(), - }; - rp_state.fallbacks.insert(summary.candidate, attesting.clone()); - attesting - }, - Statement::Valid(candidate_hash) => { - if let Some(attesting) = rp_state.fallbacks.get_mut(candidate_hash) { - let our_index = rp_state.table_context.validator.as_ref().map(|v| v.index()); - if our_index == Some(statement.validator_index()) { - return Ok(()) - } + let validators: Vec<_> = try_runtime_api!(validators); + let (validator_groups, group_rotation_info) = try_runtime_api!(groups); + let session_index = try_runtime_api!(session_index); + let cores = try_runtime_api!(cores); + + let signing_context = SigningContext { parent_hash: parent, session_index }; + let validator = + match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await { + Ok(v) => Some(v), + Err(util::Error::NotAValidator) => None, + Err(e) => { + gum::warn!( + target: LOG_TARGET, + err = ?e, + "Cannot participate in candidate backing", + ); - if rp_state.awaiting_validation.contains(candidate_hash) { - // Job already running: - attesting.backing.push(statement.validator_index()); - return Ok(()) - } else { - // No job, so start another with current validator: - attesting.from_validator = statement.validator_index(); - attesting.clone() - } - } else { - return Ok(()) - } + return Ok(None) }, }; - kick_off_validation_work(ctx, rp_state, &state.background_validation_tx, attesting).await?; - } - Ok(()) -} - -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_statement_message( - ctx: &mut Context, - state: &mut State, - relay_parent: Hash, - statement: SignedFullStatement, - metrics: &Metrics, -) -> Result<(), Error> { - let _timer = metrics.time_process_statement(); + let mut groups = HashMap::new(); + let n_cores = cores.len(); + let mut assignment = None; - match maybe_validate_and_import(ctx, state, relay_parent, statement, metrics).await { - Err(Error::ValidationFailed(_)) => Ok(()), - Err(e) => Err(e), - Ok(()) => Ok(()), + for (idx, core) in cores.into_iter().enumerate() { + // Ignore prospective assignments on occupied cores for the time being. + if let CoreState::Scheduled(scheduled) = core { + let core_index = CoreIndex(idx as _); + let group_index = group_rotation_info.group_for_core(core_index, n_cores); + if let Some(g) = validator_groups.get(group_index.0 as usize) { + if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { + assignment = Some((scheduled.para_id, scheduled.collator)); + } + groups.insert(scheduled.para_id, g.clone()); + } + } } -} -/// Kick off background validation with intent to second. -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn validate_and_second( - ctx: &mut Context, - rp_state: &mut PerRelayParentState, - candidate: &CandidateReceipt, - pov: Arc, - background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, -) -> Result<(), Error> { - let candidate_hash = candidate.hash(); - - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - candidate_receipt = ?candidate, - "Validate and second candidate", - ); + let table_context = TableContext { groups, validators, validator }; - let bg_sender = ctx.sender().clone(); - background_validate_and_make_available( - ctx, - rp_state, - BackgroundValidationParams { - sender: bg_sender, - tx_command: background_validation_tx.clone(), - candidate: candidate.clone(), - relay_parent: rp_state.parent, - pov: PoVData::Ready(pov), - n_validators: rp_state.table_context.validators.len(), - span: None, - make_command: ValidatedCandidateCommand::Second, - }, - ) - .await?; + // TODO [now]: I've removed the `required_collator` more broadly, + // because it's not used in practice and was intended for parathreads. + // + // We should attempt parathreads another way, I think, so it makes sense + // to remove. + let assignment = assignment.map(|(a, _required_collator)| a); - Ok(()) + Ok(Some(PerRelayParentState { + parent, + session_index, + assignment, + backed: HashSet::new(), + table: Table::default(), + table_context, + issued_statements: HashSet::new(), + awaiting_validation: HashSet::new(), + fallbacks: HashMap::new(), + })) } #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_second_msg( +async fn handle_validated_candidate_command( ctx: &mut Context, state: &mut State, - candidate: CandidateReceipt, - pov: PoV, + relay_parent: Hash, + command: ValidatedCandidateCommand, metrics: &Metrics, ) -> Result<(), Error> { - let _timer = metrics.time_process_second(); + match state.per_relay_parent.get_mut(&relay_parent) { + Some(rp_state) => { + let candidate_hash = command.candidate_hash(); + rp_state.awaiting_validation.remove(&candidate_hash); - let candidate_hash = candidate.hash(); - let relay_parent = candidate.descriptor().relay_parent; + match command { + ValidatedCandidateCommand::Second(res) => match res { + Ok((candidate, commitments, _)) => { + // sanity check. + // TODO [now]: this sanity check is almost certainly + // outdated - we now allow seconding multiple candidates + // per relay-parent. update it to properly defend against + // seconding stuff wrongly. + // + // The way we'll do this is by asking the prospective parachains + // subsystem about the hypothetical depth of the candidate at all + // active leaves and then ensuring we've not seconded anything with + // those depths at any of our active leaves. + if !rp_state.issued_statements.contains(&candidate_hash) { + let statement = Statement::Seconded(CommittedCandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments, + }); - let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { - None => { - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - ?candidate_hash, - "We were asked to second a candidate outside of our view." - ); + // TODO [now]: if we get an Error::RejectedByProspectiveParachains, + // then the statement has not been distributed. In this case, + // we should expunge the candidate from the rp_state, + if let Some(stmt) = sign_import_and_distribute_statement( + ctx, + rp_state, + statement, + state.keystore.clone(), + metrics, + ) + .await? + { + // TODO [now]: note the candidate as seconded in the + // per-candidate state. + rp_state.issued_statements.insert(candidate_hash); - return Ok(()) - } - Some(r) => r, - }; + metrics.on_candidate_seconded(); + ctx.send_message(CollatorProtocolMessage::Seconded( + rp_state.parent, + stmt, + )) + .await; + } + } + }, + Err(candidate) => { + ctx.send_message(CollatorProtocolMessage::Invalid( + rp_state.parent, + candidate, + )) + .await; + }, + }, + ValidatedCandidateCommand::Attest(res) => { + // We are done - avoid new validation spawns: + rp_state.fallbacks.remove(&candidate_hash); + // sanity check. + if !rp_state.issued_statements.contains(&candidate_hash) { + if res.is_ok() { + let statement = Statement::Valid(candidate_hash); - // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != rp_state.assignment { - gum::debug!( - target: LOG_TARGET, - our_assignment = ?rp_state.assignment, - collation = ?candidate.descriptor().para_id, - "Subsystem asked to second for para outside of our assignment", - ); + sign_import_and_distribute_statement( + ctx, + rp_state, + statement, + state.keystore.clone(), + metrics, + ) + .await?; + } + rp_state.issued_statements.insert(candidate_hash); + } + }, + ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => { + if let Some(attesting) = rp_state.fallbacks.get_mut(&candidate_hash) { + if let Some(index) = attesting.backing.pop() { + attesting.from_validator = index; + let attesting = attesting.clone(); - return Ok(()) + kick_off_validation_work( + ctx, + rp_state, + &state.background_validation_tx, + attesting, + ) + .await?; + } + } else { + gum::warn!( + target: LOG_TARGET, + "AttestNoPoV was triggered without fallback being available." + ); + debug_assert!(false); + } + }, + } + }, + None => { + // simple race condition; can be ignored = this relay-parent + // is no longer relevant. + }, } - // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a - // Seconded statement only if we have not seconded any other candidate and - // have not signed a Valid statement for the requested candidate. - // - // TODO [now]: this check is outdated. we need to only second when we have seconded - // nothing else with the hypothetical depth of the candidate in all our active leaves. - - // if self.seconded.is_none() { - // // This job has not seconded a candidate yet. - - // if !self.issued_statements.contains(&candidate_hash) { - // let pov = Arc::new(pov); - // self.validate_and_second(&span, &root_span, ctx, &candidate, pov).await?; - // } - // } - Ok(()) } -struct JobAndSpan { - job: CandidateBackingJob, - span: PerLeafSpan, -} - -struct ViewEntry { - job: Option>, -} - -struct View { - // maps relay-parents to jobs and spans. - implicit_view: HashMap>, -} - -impl View { - fn new() -> Self { - View { implicit_view: HashMap::new() } - } - - fn job_mut<'a>(&'a mut self, relay_parent: &Hash) -> Option<&'a mut JobAndSpan> { - self.implicit_view.get_mut(relay_parent).and_then(|x| x.job.as_mut()) - } -} - -/// Holds all data needed for candidate backing job operation. -struct CandidateBackingJob { - /// The hash of the relay parent on top of which this job is doing it's work. - parent: Hash, - /// The session index this corresponds to. - session_index: SessionIndex, - /// The `ParaId` assigned to this validator - assignment: Option, - /// The collator required to author the candidate, if any. - required_collator: Option, - /// Spans for all candidates that are not yet backable. - unbacked_candidates: HashMap, - /// We issued `Seconded`, `Valid` or `Invalid` statements on about these candidates. - issued_statements: HashSet, - /// These candidates are undergoing validation in the background. - awaiting_validation: HashSet, - /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`. - fallbacks: HashMap)>, - /// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` hash. - seconded: Option, - /// The candidates that are includable, by hash. Each entry here indicates - /// that we've sent the provisioner the backed candidate. - backed: HashSet, +async fn sign_statement( + rp_state: &PerRelayParentState, + statement: Statement, keystore: SyncCryptoStorePtr, - table: Table, - table_context: TableContext, - background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, - metrics: Metrics, - _marker: std::marker::PhantomData, + metrics: &Metrics, +) -> Option { + let signed = rp_state + .table_context + .validator + .as_ref()? + .sign(keystore, statement) + .await + .ok() + .flatten()?; + metrics.on_statement_signed(); + Some(signed) } -/// In case a backing validator does not provide a PoV, we need to retry with other backing -/// validators. +/// The dispute coordinator keeps track of all statements by validators about every recent +/// candidate. /// -/// This is the data needed to accomplish this. Basically all the data needed for spawning a -/// validation job and a list of backing validators, we can try. -#[derive(Clone)] -struct AttestingData { - /// The candidate to attest. - candidate: CandidateReceipt, - /// Hash of the PoV we need to fetch. - pov_hash: Hash, - /// Validator we are currently trying to get the PoV from. - from_validator: ValidatorIndex, - /// Other backing validators we can try in case `from_validator` failed. - backing: Vec, -} - -/// How many votes we need to consider a candidate backed. +/// When importing a statement, this should be called access the candidate receipt either +/// from the statement itself or from the underlying statement table in order to craft +/// and dispatch the notification to the dispute coordinator. /// -/// WARNING: This has to be kept in sync with the runtime check in the inclusion module. -fn minimum_votes(n_validators: usize) -> usize { - std::cmp::min(2, n_validators) -} - -#[derive(Default)] -struct TableContext { - validator: Option, - groups: HashMap>, - validators: Vec, -} - -impl TableContextTrait for TableContext { - type AuthorityId = ValidatorIndex; - type Digest = CandidateHash; - type GroupId = ParaId; - type Signature = ValidatorSignature; - type Candidate = CommittedCandidateReceipt; +/// This also does bounds-checking on the validator index and will return an error if the +/// validator index is out of bounds for the current validator set. It's expected that +/// this should never happen due to the interface of the candidate backing subsystem - +/// the networking component responsible for feeding statements to the backing subsystem +/// is meant to check the signature and provenance of all statements before submission. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn dispatch_new_statement_to_dispute_coordinator( + ctx: &mut Context, + rp_state: &PerRelayParentState, + candidate_hash: CandidateHash, + statement: &SignedFullStatement, +) -> Result<(), ValidatorIndexOutOfBounds> { + // Dispatch the statement to the dispute coordinator. + let validator_index = statement.validator_index(); + let signing_context = + SigningContext { parent_hash: rp_state.parent, session_index: rp_state.session_index }; - fn candidate_digest(candidate: &CommittedCandidateReceipt) -> CandidateHash { - candidate.hash() - } + let validator_public = match rp_state.table_context.validators.get(validator_index.0 as usize) { + None => return Err(ValidatorIndexOutOfBounds), + Some(v) => v, + }; - fn candidate_group(candidate: &CommittedCandidateReceipt) -> ParaId { - candidate.descriptor().para_id - } + let maybe_candidate_receipt = match statement.payload() { + Statement::Seconded(receipt) => Some(receipt.to_plain()), + Statement::Valid(candidate_hash) => { + // Valid statements are only supposed to be imported + // once we've seen at least one `Seconded` statement. + rp_state.table.get_candidate(&candidate_hash).map(|c| c.to_plain()) + }, + }; - fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { - self.groups - .get(group) - .map_or(false, |g| g.iter().position(|a| a == authority).is_some()) - } + let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement( + statement.as_unchecked(), + signing_context, + validator_public.clone(), + ) + .ok(); - fn requisite_votes(&self, group: &ParaId) -> usize { - self.groups.get(group).map_or(usize::MAX, |g| minimum_votes(g.len())) + if let (Some(candidate_receipt), Some(dispute_statement)) = + (maybe_candidate_receipt, maybe_signed_dispute_statement) + { + ctx.send_message(DisputeCoordinatorMessage::ImportStatements { + candidate_hash, + candidate_receipt, + session: rp_state.session_index, + statements: vec![(dispute_statement, validator_index)], + pending_confirmation: None, + }) + .await; } -} -struct InvalidErasureRoot; - -// It looks like it's not possible to do an `impl From` given the current state of -// the code. So this does the necessary conversion. -fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { - let statement = match s.payload() { - Statement::Seconded(c) => TableStatement::Seconded(c.clone()), - Statement::Valid(h) => TableStatement::Valid(h.clone()), - }; - - TableSignedStatement { - statement, - signature: s.signature().clone(), - sender: s.validator_index(), - } + Ok(()) } -fn table_attested_to_backed( - attested: TableAttestedCandidate< - ParaId, - CommittedCandidateReceipt, - ValidatorIndex, - ValidatorSignature, - >, - table_context: &TableContext, -) -> Option { - let TableAttestedCandidate { candidate, validity_votes, group_id: para_id } = attested; - - let (ids, validity_votes): (Vec<_>, Vec) = - validity_votes.into_iter().map(|(id, vote)| (id, vote.into())).unzip(); - - let group = table_context.groups.get(¶_id)?; - - let mut validator_indices = BitVec::with_capacity(group.len()); +/// Import a statement into the statement table and return the summary of the import. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn import_statement( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + statement: &SignedFullStatement, +) -> Result, Error> { + gum::debug!( + target: LOG_TARGET, + statement = ?statement.payload().to_compact(), + validator_index = statement.validator_index().0, + "Importing statement", + ); - validator_indices.resize(group.len(), false); + let candidate_hash = statement.payload().candidate_hash(); - // The order of the validity votes in the backed candidate must match - // the order of bits set in the bitfield, which is not necessarily - // the order of the `validity_votes` we got from the table. - let mut vote_positions = Vec::with_capacity(validity_votes.len()); - for (orig_idx, id) in ids.iter().enumerate() { - if let Some(position) = group.iter().position(|x| x == id) { - validator_indices.set(position, true); - vote_positions.push((orig_idx, position)); - } else { - gum::warn!( - target: LOG_TARGET, - "Logic error: Validity vote from table does not correspond to group", - ); + if let Err(ValidatorIndexOutOfBounds) = + dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, &statement) + .await + { + gum::warn!( + target: LOG_TARGET, + session_index = ?rp_state.session_index, + relay_parent = ?rp_state.parent, + validator_index = statement.validator_index().0, + "Supposedly 'Signed' statement has validator index out of bounds." + ); - return None - } + return Ok(None) } - vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); - - Some(BackedCandidate { - candidate, - validity_votes: vote_positions - .into_iter() - .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) - .collect(), - validator_indices, - }) -} - -async fn store_available_data( - sender: &mut impl overseer::CandidateBackingSenderTrait, - n_validators: u32, - candidate_hash: CandidateHash, - available_data: AvailableData, -) -> Result<(), Error> { - let (tx, rx) = oneshot::channel(); - sender - .send_message(AvailabilityStoreMessage::StoreAvailableData { - candidate_hash, - n_validators, - available_data, - tx, - }) - .await; - - let _ = rx.await.map_err(Error::StoreAvailableData)?; - Ok(()) -} + let stmt = primitive_statement_to_table(statement); -// Make a `PoV` available. -// -// This will compute the erasure root internally and compare it to the expected erasure root. -// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`. -async fn make_pov_available( - sender: &mut impl overseer::CandidateBackingSenderTrait, - n_validators: usize, - pov: Arc, - candidate_hash: CandidateHash, - validation_data: PersistedValidationData, - expected_erasure_root: Hash, - span: Option<&jaeger::Span>, -) -> Result, Error> { - let available_data = AvailableData { pov, validation_data }; + // TODO [now]: we violate the pre-existing checks that each validator may + // only second one candidate. + // + // We will need to address this so we don't get errors incorrectly. + let summary = rp_state.table.import_statement(&rp_state.table_context, stmt); + if let Some(attested) = summary + .as_ref() + .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context)) { - let _span = span.as_ref().map(|s| s.child("erasure-coding").with_candidate(candidate_hash)); + // TODO [now] + // + // If this is a new candidate, we need to create an entry in the + // `PerCandidateState` map. + // + // If the relay parent supports prospective parachains, we also need + // to inform the prospective parachains subsystem of the seconded candidate + // If `ProspectiveParachainsMessage::Second` fails, then we expunge the + // statement from the table and return an error, which should be handled + // to avoid distribution of the statement. - let chunks = erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; + let candidate_hash = attested.candidate.hash(); + // `HashSet::insert` returns true if the thing wasn't in there already. + if rp_state.backed.insert(candidate_hash) { + if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + relay_parent = ?rp_state.parent, + para_id = %backed.candidate.descriptor.para_id, + "Candidate backed", + ); - let branches = erasure_coding::branches(chunks.as_ref()); - let erasure_root = branches.root(); + // TODO [now]: inform the prospective parachains subsystem + // that the candidate is now backed. - if erasure_root != expected_erasure_root { - return Ok(Err(InvalidErasureRoot)) + // The provisioner waits on candidate-backing, which means + // that we need to send unbounded messages to avoid cycles. + // + // Backed candidates are bounded by the number of validators, + // parachains, and the block production rate of the relay chain. + let message = ProvisionerMessage::ProvisionableData( + rp_state.parent, + ProvisionableData::BackedCandidate(backed.receipt()), + ); + ctx.send_unbounded_message(message); + } } } - { - let _span = span.as_ref().map(|s| s.child("store-data").with_candidate(candidate_hash)); - - store_available_data(sender, n_validators as u32, candidate_hash, available_data).await?; - } + issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); - Ok(Ok(())) + Ok(summary) } -async fn request_pov( - sender: &mut impl overseer::CandidateBackingSenderTrait, +/// Check if there have happened any new misbehaviors and issue necessary messages. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +fn issue_new_misbehaviors( + ctx: &mut Context, relay_parent: Hash, - from_validator: ValidatorIndex, - candidate_hash: CandidateHash, - pov_hash: Hash, -) -> Result, Error> { - let (tx, rx) = oneshot::channel(); - sender - .send_message(AvailabilityDistributionMessage::FetchPoV { + table: &mut Table, +) { + // collect the misbehaviors to avoid double mutable self borrow issues + let misbehaviors: Vec<_> = table.drain_misbehaviors().collect(); + for (validator_id, report) in misbehaviors { + // The provisioner waits on candidate-backing, which means + // that we need to send unbounded messages to avoid cycles. + // + // Misbehaviors are bounded by the number of validators and + // the block production protocol. + ctx.send_unbounded_message(ProvisionerMessage::ProvisionableData( relay_parent, - from_validator, - candidate_hash, - pov_hash, - tx, - }) - .await; - - let pov = rx.await.map_err(|_| Error::FetchPoV)?; - Ok(Arc::new(pov)) + ProvisionableData::MisbehaviorReport(relay_parent, validator_id, report), + )); + } } -async fn request_candidate_validation( - sender: &mut impl overseer::CandidateBackingSenderTrait, - candidate_receipt: CandidateReceipt, - pov: Arc, -) -> Result { - let (tx, rx) = oneshot::channel(); +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn sign_import_and_distribute_statement( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + statement: Statement, + keystore: SyncCryptoStorePtr, + metrics: &Metrics, +) -> Result, Error> { + if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { + import_statement(ctx, rp_state, &signed_statement).await?; - // TODO [now]: always do exhaustive validation. - sender - .send_message(CandidateValidationMessage::ValidateFromChainState( - candidate_receipt, - pov, - BACKING_EXECUTION_TIMEOUT, - tx, - )) - .await; + // TODO [now]: if we get an Error::RejectedByProspectiveParachains, + // we _do not_ distribute - it has been expunged. + // Propagate the error onwards. + let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); + ctx.send_unbounded_message(smsg); - match rx.await { - Ok(Ok(validation_result)) => Ok(validation_result), - Ok(Err(err)) => Err(Error::ValidationFailed(err)), - Err(err) => Err(Error::ValidateFromChainState(err)), + Ok(Some(signed_statement)) + } else { + Ok(None) } } -type BackgroundValidationResult = - Result<(CandidateReceipt, CandidateCommitments, Arc), CandidateReceipt>; - -struct BackgroundValidationParams { - sender: S, - tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, - candidate: CandidateReceipt, - relay_parent: Hash, - pov: PoVData, - n_validators: usize, - span: Option, - make_command: F, -} - -async fn validate_and_make_available( +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn background_validate_and_make_available( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, params: BackgroundValidationParams< impl overseer::CandidateBackingSenderTrait, - impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Sync, + impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync, >, ) -> Result<(), Error> { - let BackgroundValidationParams { - mut sender, - mut tx_command, - candidate, - relay_parent, - pov, - n_validators, - span, - make_command, - } = params; - - let pov = match pov { - PoVData::Ready(pov) => pov, - PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => { - let _span = span.as_ref().map(|s| s.child("request-pov")); - match request_pov(&mut sender, relay_parent, from_validator, candidate_hash, pov_hash) - .await - { - Err(Error::FetchPoV) => { - tx_command - .send(( - relay_parent, - ValidatedCandidateCommand::AttestNoPoV(candidate.hash()), - )) - .await - .map_err(Error::BackgroundValidationMpsc)?; - return Ok(()) - }, - Err(err) => return Err(err), - Ok(pov) => pov, + let candidate_hash = params.candidate.hash(); + if rp_state.awaiting_validation.insert(candidate_hash) { + // spawn background task. + let bg = async move { + if let Err(e) = validate_and_make_available(params).await { + if let Error::BackgroundValidationMpsc(error) = e { + gum::debug!( + target: LOG_TARGET, + ?error, + "Mpsc background validation mpsc died during validation- leaf no longer active?" + ); + } else { + gum::error!( + target: LOG_TARGET, + "Failed to validate and make available: {:?}", + e + ); + } } - }, - }; + }; - let v = { - let _span = span.as_ref().map(|s| { - s.child("request-validation") - .with_pov(&pov) - .with_para_id(candidate.descriptor().para_id) - }); - request_candidate_validation(&mut sender, candidate.clone(), pov.clone()).await? + ctx.spawn("backing-validation", bg.boxed()) + .map_err(|_| Error::FailedToSpawnBackgroundTask)?; + } + + Ok(()) +} + +/// Kick off validation work and distribute the result as a signed statement. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn kick_off_validation_work( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + attesting: AttestingData, +) -> Result<(), Error> { + let candidate_hash = attesting.candidate.hash(); + if rp_state.issued_statements.contains(&candidate_hash) { + return Ok(()) + } + + let descriptor = attesting.candidate.descriptor().clone(); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + candidate_receipt = ?attesting.candidate, + "Kicking off validation", + ); + + let bg_sender = ctx.sender().clone(); + let pov = PoVData::FetchFromValidator { + from_validator: attesting.from_validator, + candidate_hash, + pov_hash: attesting.pov_hash, }; - let res = match v { - ValidationResult::Valid(commitments, validation_data) => { - gum::debug!( + // TODO [now]: as we refactor validation to always take + // exhaustive parameters, this will need to change. + // + // Also, we will probably need to account for depth here, maybe. + background_validate_and_make_available( + ctx, + rp_state, + BackgroundValidationParams { + sender: bg_sender, + tx_command: background_validation_tx.clone(), + candidate: attesting.candidate, + relay_parent: rp_state.parent, + pov, + n_validators: rp_state.table_context.validators.len(), + span: None, + make_command: ValidatedCandidateCommand::Attest, + }, + ) + .await +} + +/// Import the statement and kick off validation work if it is a part of our assignment. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn maybe_validate_and_import( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatement, + metrics: &Metrics, +) -> Result<(), Error> { + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { + Some(r) => r, + None => { + gum::trace!( target: LOG_TARGET, - candidate_hash = ?candidate.hash(), - "Validation successful", + ?relay_parent, + "Received statement for unknown relay-parent" ); - let erasure_valid = make_pov_available( - &mut sender, - n_validators, - pov.clone(), - candidate.hash(), - validation_data, - candidate.descriptor.erasure_root, - span.as_ref(), - ) - .await?; - - match erasure_valid { - Ok(()) => Ok((candidate, commitments, pov.clone())), - Err(InvalidErasureRoot) => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate.hash(), - actual_commitments = ?commitments, - "Erasure root doesn't match the announced by the candidate receipt", - ); - Err(candidate) - }, - } - }, - ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch) => { - // If validation produces a new set of commitments, we vote the candidate as invalid. - gum::warn!( - target: LOG_TARGET, - candidate_hash = ?candidate.hash(), - "Validation yielded different commitments", - ); - Err(candidate) - }, - ValidationResult::Invalid(reason) => { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate.hash(), - reason = ?reason, - "Validation yielded an invalid candidate", - ); - Err(candidate) + return Ok(()) }, }; - tx_command.send((relay_parent, make_command(res))).await.map_err(Into::into) -} + // TODO [now]: if we get an Error::RejectedByProspectiveParachains, + // we will do nothing. + if let Some(summary) = import_statement(ctx, rp_state, &statement).await? { + // import_statement already takes care of communicating with the + // prospective parachains subsystem. At this point, the candidate + // has already been accepted into the fragment trees. -struct ValidatorIndexOutOfBounds; + if Some(summary.group_id) != rp_state.assignment { + return Ok(()) + } + let attesting = match statement.payload() { + Statement::Seconded(receipt) => { + let candidate_hash = summary.candidate; -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -impl CandidateBackingJob { - async fn background_validate_and_make_available( - &mut self, - ctx: &mut Context, - params: BackgroundValidationParams< - impl overseer::CandidateBackingSenderTrait, - impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync, - >, - ) -> Result<(), Error> { - let candidate_hash = params.candidate.hash(); - if self.awaiting_validation.insert(candidate_hash) { - // spawn background task. - let bg = async move { - if let Err(e) = validate_and_make_available(params).await { - if let Error::BackgroundValidationMpsc(error) = e { - gum::debug!( - target: LOG_TARGET, - ?error, - "Mpsc background validation mpsc died during validation- leaf no longer active?" - ); + let attesting = AttestingData { + candidate: rp_state + .table + .get_candidate(&candidate_hash) + .ok_or(Error::CandidateNotFound)? + .to_plain(), + pov_hash: receipt.descriptor.pov_hash, + from_validator: statement.validator_index(), + backing: Vec::new(), + }; + rp_state.fallbacks.insert(summary.candidate, attesting.clone()); + attesting + }, + Statement::Valid(candidate_hash) => { + if let Some(attesting) = rp_state.fallbacks.get_mut(candidate_hash) { + let our_index = rp_state.table_context.validator.as_ref().map(|v| v.index()); + if our_index == Some(statement.validator_index()) { + return Ok(()) + } + + if rp_state.awaiting_validation.contains(candidate_hash) { + // Job already running: + attesting.backing.push(statement.validator_index()); + return Ok(()) } else { - gum::error!( - target: LOG_TARGET, - "Failed to validate and make available: {:?}", - e - ); + // No job, so start another with current validator: + attesting.from_validator = statement.validator_index(); + attesting.clone() } + } else { + return Ok(()) } - }; - - ctx.spawn("backing-validation", bg.boxed()) - .map_err(|_| Error::FailedToSpawnBackgroundTask)?; - } + }, + }; - Ok(()) + kick_off_validation_work(ctx, rp_state, &state.background_validation_tx, attesting).await?; } + Ok(()) +} - /// Kick off background validation with intent to second. - async fn validate_and_second( - &mut self, - parent_span: &jaeger::Span, - root_span: &jaeger::Span, - ctx: &mut Context, - candidate: &CandidateReceipt, - pov: Arc, - ) -> Result<(), Error> { - // Check that candidate is collated by the right collator. - if self - .required_collator - .as_ref() - .map_or(false, |c| c != &candidate.descriptor().collator) - { - ctx.send_message(CollatorProtocolMessage::Invalid(self.parent, candidate.clone())) - .await; - return Ok(()) - } +/// Kick off background validation with intent to second. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn validate_and_second( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + candidate: &CandidateReceipt, + pov: Arc, + background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, +) -> Result<(), Error> { + let candidate_hash = candidate.hash(); - let candidate_hash = candidate.hash(); - let mut span = self.get_unbacked_validation_child( - root_span, - candidate_hash, - candidate.descriptor().para_id, - ); + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + candidate_receipt = ?candidate, + "Validate and second candidate", + ); - span.as_mut().map(|span| span.add_follows_from(parent_span)); + let bg_sender = ctx.sender().clone(); + background_validate_and_make_available( + ctx, + rp_state, + BackgroundValidationParams { + sender: bg_sender, + tx_command: background_validation_tx.clone(), + candidate: candidate.clone(), + relay_parent: rp_state.parent, + pov: PoVData::Ready(pov), + n_validators: rp_state.table_context.validators.len(), + span: None, + make_command: ValidatedCandidateCommand::Second, + }, + ) + .await?; - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - candidate_receipt = ?candidate, - "Validate and second candidate", - ); + Ok(()) +} - let bg_sender = ctx.sender().clone(); - self.background_validate_and_make_available( - ctx, - BackgroundValidationParams { - sender: bg_sender, - tx_command: self.background_validation_tx.clone(), - candidate: candidate.clone(), - relay_parent: self.parent, - pov: PoVData::Ready(pov), - n_validators: self.table_context.validators.len(), - span, - make_command: ValidatedCandidateCommand::Second, - }, - ) - .await?; +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_second_message( + ctx: &mut Context, + state: &mut State, + candidate: CandidateReceipt, + pov: PoV, + metrics: &Metrics, +) -> Result<(), Error> { + let _timer = metrics.time_process_second(); - Ok(()) - } + let candidate_hash = candidate.hash(); + let relay_parent = candidate.descriptor().relay_parent; - async fn handle_second_msg( - &mut self, - root_span: &jaeger::Span, - ctx: &mut Context, - candidate: CandidateReceipt, - pov: PoV, - ) -> Result<(), Error> { - let _timer = self.metrics.time_process_second(); - - let candidate_hash = candidate.hash(); - let span = root_span - .child("second") - .with_stage(jaeger::Stage::CandidateBacking) - .with_pov(&pov) - .with_candidate(candidate_hash) - .with_relay_parent(self.parent); - - // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != self.assignment { - gum::debug!( + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => { + gum::trace!( target: LOG_TARGET, - our_assignment = ?self.assignment, - collation = ?candidate.descriptor().para_id, - "Subsystem asked to second for para outside of our assignment", + ?relay_parent, + ?candidate_hash, + "We were asked to second a candidate outside of our view." ); return Ok(()) } + Some(r) => r, + }; - // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a - // Seconded statement only if we have not seconded any other candidate and - // have not signed a Valid statement for the requested candidate. - if self.seconded.is_none() { - // This job has not seconded a candidate yet. - - if !self.issued_statements.contains(&candidate_hash) { - let pov = Arc::new(pov); - self.validate_and_second(&span, &root_span, ctx, &candidate, pov).await?; - } - } - - Ok(()) - } - - async fn handle_statement_message( - &mut self, - root_span: &jaeger::Span, - ctx: &mut Context, - statement: SignedFullStatement, - ) -> Result<(), Error> { - // function pending removal. - unimplemented!() - } - - fn handle_get_backed_candidates_message( - &mut self, - requested_candidates: Vec, - tx: oneshot::Sender>, - ) -> Result<(), Error> { - let _timer = self.metrics.time_get_backed_candidates(); + // Sanity check that candidate is from our assignment. + if Some(candidate.descriptor().para_id) != rp_state.assignment { + gum::debug!( + target: LOG_TARGET, + our_assignment = ?rp_state.assignment, + collation = ?candidate.descriptor().para_id, + "Subsystem asked to second for para outside of our assignment", + ); - let backed = requested_candidates - .into_iter() - .filter_map(|hash| { - self.table - .attested_candidate(&hash, &self.table_context) - .and_then(|attested| table_attested_to_backed(attested, &self.table_context)) - }) - .collect(); - - tx.send(backed).map_err(|data| Error::Send(data))?; - Ok(()) + return Ok(()) } - /// Kick off validation work and distribute the result as a signed statement. - async fn kick_off_validation_work( - &mut self, - ctx: &mut Context, - attesting: AttestingData, - span: Option, - ) -> Result<(), Error> { - let candidate_hash = attesting.candidate.hash(); - if self.issued_statements.contains(&candidate_hash) { - return Ok(()) - } - - let descriptor = attesting.candidate.descriptor().clone(); + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a + // Seconded statement only if we have not seconded any other candidate and + // have not signed a Valid statement for the requested candidate. + // + // TODO [now]: this check is outdated. we need to only second when we have seconded + // nothing else with the hypothetical depth of the candidate in all our active leaves. - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - candidate_receipt = ?attesting.candidate, - "Kicking off validation", - ); + // if self.seconded.is_none() { + // // This job has not seconded a candidate yet. - // Check that candidate is collated by the right collator. - if self.required_collator.as_ref().map_or(false, |c| c != &descriptor.collator) { - // If not, we've got the statement in the table but we will - // not issue validation work for it. - // - // Act as though we've issued a statement. - self.issued_statements.insert(candidate_hash); - return Ok(()) - } + // if !self.issued_statements.contains(&candidate_hash) { + // let pov = Arc::new(pov); + // self.validate_and_second(&span, &root_span, ctx, &candidate, pov).await?; + // } + // } - let bg_sender = ctx.sender().clone(); - let pov = PoVData::FetchFromValidator { - from_validator: attesting.from_validator, - candidate_hash, - pov_hash: attesting.pov_hash, - }; - self.background_validate_and_make_available( - ctx, - BackgroundValidationParams { - sender: bg_sender, - tx_command: self.background_validation_tx.clone(), - candidate: attesting.candidate, - relay_parent: self.parent, - pov, - n_validators: self.table_context.validators.len(), - span, - make_command: ValidatedCandidateCommand::Attest, - }, - ) - .await - } + Ok(()) +} - /// Insert or get the unbacked-span for the given candidate hash. - fn insert_or_get_unbacked_span( - &mut self, - parent_span: &jaeger::Span, - hash: CandidateHash, - para_id: Option, - ) -> Option<&jaeger::Span> { - if !self.backed.contains(&hash) { - // only add if we don't consider this backed. - let span = self.unbacked_candidates.entry(hash).or_insert_with(|| { - let s = parent_span.child("unbacked-candidate").with_candidate(hash); - if let Some(para_id) = para_id { - s.with_para_id(para_id) - } else { - s - } - }); - Some(span) - } else { - None - } - } +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_statement_message( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatement, + metrics: &Metrics, +) -> Result<(), Error> { + let _timer = metrics.time_process_statement(); - fn get_unbacked_validation_child( - &mut self, - parent_span: &jaeger::Span, - hash: CandidateHash, - para_id: ParaId, - ) -> Option { - self.insert_or_get_unbacked_span(parent_span, hash, Some(para_id)).map(|span| { - span.child("validation") - .with_candidate(hash) - .with_stage(Stage::CandidateBacking) - }) + match maybe_validate_and_import(ctx, state, relay_parent, statement, metrics).await { + Err(Error::ValidationFailed(_)) => Ok(()), + Err(e) => Err(e), + Ok(()) => Ok(()), } +} - fn get_unbacked_statement_child( - &mut self, - parent_span: &jaeger::Span, - hash: CandidateHash, - validator: ValidatorIndex, - ) -> Option { - self.insert_or_get_unbacked_span(parent_span, hash, None).map(|span| { - span.child("import-statement") - .with_candidate(hash) - .with_validator_index(validator) +fn handle_get_backed_candidates_message( + rp_state: &PerRelayParentState, + requested_candidates: Vec, + tx: oneshot::Sender>, + metrics: &Metrics, +) -> Result<(), Error> { + let _timer = metrics.time_get_backed_candidates(); + + let backed = requested_candidates + .into_iter() + .filter_map(|hash| { + rp_state.table + .attested_candidate(&hash, &rp_state.table_context) + .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)) }) - } + .collect(); - fn remove_unbacked_span(&mut self, hash: &CandidateHash) -> Option { - self.unbacked_candidates.remove(hash) - } + tx.send(backed).map_err(|data| Error::Send(data))?; + Ok(()) } From 22a9d5e854b1574475a66320801e66394585a863 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 18:53:01 -0500 Subject: [PATCH 20/54] add prospective parachains mode to relay parent entries --- node/core/backing/src/lib.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index dac815015392..fac83d92afb7 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -200,7 +200,7 @@ where } struct PerRelayParentState { - // TODO [now]: add a `ProspectiveParachainsMode` to the leaf. + mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, /// The session index this corresponds to. @@ -259,7 +259,7 @@ struct ActiveLeafState { /// The state of the subsystem. struct State { - /// The utility for managing the implicit and explicit views ina consistent way. + /// The utility for managing the implicit and explicit views in a consistent way. /// /// We only feed leaves which have prospective parachains enabled to this view. implicit_view: ImplicitView, @@ -939,9 +939,21 @@ async fn handle_active_leaves_update( continue } + let mode = match state.per_leaf.get(&maybe_new) { + None => { + // If the relay-parent isn't a leaf itself, + // then it is guaranteed by the prospective parachains + // subsystem that it is an ancestor of a leaf which + // has prospective parachains enabled and that the + // block itself did. + ProspectiveParachainsMode::Enabled + } + Some(l) => l.prospective_parachains_mode, + }; + // construct a `PerRelayParent` from the runtime API // and insert it. - let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore).await?; + let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?; if let Some(per) = per { state.per_relay_parent.insert(maybe_new, per); @@ -957,6 +969,7 @@ async fn construct_per_relay_parent_state( ctx: &mut Context, relay_parent: Hash, keystore: &SyncCryptoStorePtr, + mode: ProspectiveParachainsMode, ) -> Result, Error> { macro_rules! try_runtime_api { ($x: expr) => { @@ -1040,6 +1053,7 @@ async fn construct_per_relay_parent_state( let assignment = assignment.map(|(a, _required_collator)| a); Ok(Some(PerRelayParentState { + mode, parent, session_index, assignment, From fc0c4e47d17be1a73bbd0b8d7386e0587d3a9ad7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 18:53:06 -0500 Subject: [PATCH 21/54] fmt --- node/core/backing/src/lib.rs | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index fac83d92afb7..bb711c54a9c4 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -90,8 +90,7 @@ use polkadot_node_subsystem::{ ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage, }, - overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, - SubsystemError, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ self as util, @@ -728,22 +727,10 @@ async fn handle_communication( ) -> Result<(), Error> { match message { CandidateBackingMessage::Second(_relay_parent, candidate, pov) => { - handle_second_message( - ctx, - state, - candidate, - pov, - metrics, - ).await?; + handle_second_message(ctx, state, candidate, pov, metrics).await?; }, CandidateBackingMessage::Statement(relay_parent, statement) => { - handle_statement_message( - ctx, - state, - relay_parent, - statement, - metrics, - ).await?; + handle_statement_message(ctx, state, relay_parent, statement, metrics).await?; }, CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) => if let Some(rp_state) = state.per_relay_parent.get(&relay_parent) { @@ -947,7 +934,7 @@ async fn handle_active_leaves_update( // has prospective parachains enabled and that the // block itself did. ProspectiveParachainsMode::Enabled - } + }, Some(l) => l.prospective_parachains_mode, }; @@ -1620,7 +1607,7 @@ async fn handle_second_message( ); return Ok(()) - } + }, Some(r) => r, }; @@ -1683,7 +1670,8 @@ fn handle_get_backed_candidates_message( let backed = requested_candidates .into_iter() .filter_map(|hash| { - rp_state.table + rp_state + .table .attested_candidate(&hash, &rp_state.table_context) .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)) }) From a4df2770a0ace8c2b70474cab42da5773ffadc22 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 18:54:23 -0500 Subject: [PATCH 22/54] add a RejectedByProspectiveParachains error --- node/core/backing/src/error.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/node/core/backing/src/error.rs b/node/core/backing/src/error.rs index 604c6c0a0c37..c5efcea01b77 100644 --- a/node/core/backing/src/error.rs +++ b/node/core/backing/src/error.rs @@ -42,6 +42,9 @@ pub enum Error { #[error("FetchPoV failed")] FetchPoV, + #[error("Candidate rejected by prospective parachains subsystem")] + RejectedByProspectiveParachains, + #[fatal] #[error("Failed to spawn background task")] FailedToSpawnBackgroundTask, From 2f202d01c9dc490f8883c403682dcaab6ec19f6f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 20:24:09 -0500 Subject: [PATCH 23/54] notify prospective parachains of seconded and backed candidates --- node/core/backing/src/lib.rs | 137 +++++++++++++++++++++++++++++------ 1 file changed, 114 insertions(+), 23 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index bb711c54a9c4..eb47deca9a64 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -199,7 +199,7 @@ where } struct PerRelayParentState { - mode: ProspectiveParachainsMode, + prospective_parachains_mode: ProspectiveParachainsMode, /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, /// The session index this corresponds to. @@ -1040,7 +1040,7 @@ async fn construct_per_relay_parent_state( let assignment = assignment.map(|(a, _required_collator)| a); Ok(Some(PerRelayParentState { - mode, + prospective_parachains_mode: mode, parent, session_index, assignment, @@ -1086,19 +1086,32 @@ async fn handle_validated_candidate_command( }); // TODO [now]: if we get an Error::RejectedByProspectiveParachains, - // then the statement has not been distributed. In this case, - // we should expunge the candidate from the rp_state, + // then the statement has not been distributed. We need to handle this case + + // TODO [now]: get this from changing `BackgroundValidationResult`. + let persisted_validation_data = unimplemented!(); if let Some(stmt) = sign_import_and_distribute_statement( ctx, rp_state, + &mut state.per_candidate, statement, + persisted_validation_data, state.keystore.clone(), metrics, ) .await? { - // TODO [now]: note the candidate as seconded in the - // per-candidate state. + match state.per_candidate.get_mut(&candidate_hash) { + None => { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + "Missing `per_candidate` for seconded candidate.", + ); + } + Some(p) => { p.seconded_locally = true } + } + // TODO [now]: update seconded depths in active leaves. rp_state.issued_statements.insert(candidate_hash); metrics.on_candidate_seconded(); @@ -1129,7 +1142,9 @@ async fn handle_validated_candidate_command( sign_import_and_distribute_statement( ctx, rp_state, + &mut state.per_candidate, statement, + None, // only needed when seconding. state.keystore.clone(), metrics, ) @@ -1251,11 +1266,21 @@ async fn dispatch_new_statement_to_dispute_coordinator( } /// Import a statement into the statement table and return the summary of the import. +/// +/// This will fail with `Error::RejectedByProspectiveParachains` if the message type +/// is seconded, the candidate is fresh, +/// and any of the following are true: +/// 1. There is no `PersistedValidationData` attached. +/// 2. Prospective parachains are enabled for the relay parent and the prospective parachains +/// subsystem returned an empty `FragmentTreeMembership` +/// i.e. did not recognize the candidate as being applicable to any of the active leaves. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn import_statement( ctx: &mut Context, rp_state: &mut PerRelayParentState, + per_candidate: &mut HashMap, statement: &SignedFullStatement, + persisted_validation_data: Option, ) -> Result, Error> { gum::debug!( target: LOG_TARGET, @@ -1266,6 +1291,65 @@ async fn import_statement( let candidate_hash = statement.payload().candidate_hash(); + // If this is a new candidate (statement is 'seconded' and candidate is unknown), + // we need to create an entry in the `PerCandidateState` map. + // + // If the relay parent supports prospective parachains, we also need + // to inform the prospective parachains subsystem of the seconded candidate + // If `ProspectiveParachainsMessage::Second` fails, then we return + // Error::RejectedByProspectiveParachains. + // + // Persisted Validation Data should be available - it may already be available + // if this is a candidate we are seconding. + // + // We should also not accept any candidates which have no valid depths under any of + // our active leaves. + if let Statement::Seconded(candidate) = statement.payload() { + if !per_candidate.contains_key(&candidate_hash) { + let pvd = match persisted_validation_data { + None => return Err(Error::RejectedByProspectiveParachains), + Some(pvd) => pvd, + }; + + per_candidate.insert( + candidate_hash, + PerCandidateState { + persisted_validation_data: pvd.clone(), + // This is set after importing when seconding locally. + seconded_locally: false, + para_id: candidate.descriptor().para_id, + relay_parent: candidate.descriptor().relay_parent, + }, + ); + + if rp_state.prospective_parachains_mode.is_enabled() { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( + candidate.descriptor().para_id, + candidate.clone(), + pvd, + tx, + )).await; + + match rx.await { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Could not reach the Prospective Parachains subsystem." + ); + + return Err(Error::RejectedByProspectiveParachains); + } + Ok(membership) => { + if membership.is_empty() { + return Err(Error::RejectedByProspectiveParachains); + } + } + } + } + } + } + if let Err(ValidatorIndexOutOfBounds) = dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, &statement) .await @@ -1293,31 +1377,26 @@ async fn import_statement( .as_ref() .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context)) { - // TODO [now] - // - // If this is a new candidate, we need to create an entry in the - // `PerCandidateState` map. - // - // If the relay parent supports prospective parachains, we also need - // to inform the prospective parachains subsystem of the seconded candidate - // If `ProspectiveParachainsMessage::Second` fails, then we expunge the - // statement from the table and return an error, which should be handled - // to avoid distribution of the statement. - - let candidate_hash = attested.candidate.hash(); // `HashSet::insert` returns true if the thing wasn't in there already. if rp_state.backed.insert(candidate_hash) { if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { + let para_id = backed.candidate.descriptor.para_id; gum::debug!( target: LOG_TARGET, candidate_hash = ?candidate_hash, relay_parent = ?rp_state.parent, - para_id = %backed.candidate.descriptor.para_id, + %para_id, "Candidate backed", ); - // TODO [now]: inform the prospective parachains subsystem + // Inform the prospective parachains subsystem // that the candidate is now backed. + if rp_state.prospective_parachains_mode.is_enabled() { + ctx.send_message(ProspectiveParachainsMessage::CandidateBacked( + para_id, + candidate_hash, + )).await; + } // The provisioner waits on candidate-backing, which means // that we need to send unbounded messages to avoid cycles. @@ -1360,16 +1439,22 @@ fn issue_new_misbehaviors( } } +/// Sign, import, and distribute a statement. +/// +/// If the statement is a `Seconded` statement, the `persisted_validation_data` +/// must be `Some`. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn sign_import_and_distribute_statement( ctx: &mut Context, rp_state: &mut PerRelayParentState, + per_candidate: &mut HashMap, statement: Statement, + persisted_validation_data: Option, keystore: SyncCryptoStorePtr, metrics: &Metrics, ) -> Result, Error> { if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { - import_statement(ctx, rp_state, &signed_statement).await?; + import_statement(ctx, rp_state, per_candidate, &signed_statement, persisted_validation_data).await?; // TODO [now]: if we get an Error::RejectedByProspectiveParachains, // we _do not_ distribute - it has been expunged. @@ -1471,12 +1556,16 @@ async fn kick_off_validation_work( } /// Import the statement and kick off validation work if it is a part of our assignment. +/// +/// If the statement type is `Seconded`, the `persisted_validation_data` must be +/// `Some`. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn maybe_validate_and_import( ctx: &mut Context, state: &mut State, relay_parent: Hash, statement: SignedFullStatement, + persisted_validation_data: Option, metrics: &Metrics, ) -> Result<(), Error> { let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { @@ -1494,7 +1583,7 @@ async fn maybe_validate_and_import( // TODO [now]: if we get an Error::RejectedByProspectiveParachains, // we will do nothing. - if let Some(summary) = import_statement(ctx, rp_state, &statement).await? { + if let Some(summary) = import_statement(ctx, rp_state, &mut state.per_candidate, &statement, persisted_validation_data).await? { // import_statement already takes care of communicating with the // prospective parachains subsystem. At this point, the candidate // has already been accepted into the fragment trees. @@ -1652,7 +1741,9 @@ async fn handle_statement_message( ) -> Result<(), Error> { let _timer = metrics.time_process_statement(); - match maybe_validate_and_import(ctx, state, relay_parent, statement, metrics).await { + // TODO [now]: get this from the message. + let persisted_validation_data: Option = unimplemented!(); + match maybe_validate_and_import(ctx, state, relay_parent, statement, persisted_validation_data, metrics).await { Err(Error::ValidationFailed(_)) => Ok(()), Err(e) => Err(e), Ok(()) => Ok(()), From 910b9972d09a079623b3eefe24a383b9fe5ea092 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 20:48:52 -0500 Subject: [PATCH 24/54] always validate candidates exhaustively in backing. --- node/core/backing/src/error.rs | 17 +++- node/core/backing/src/lib.rs | 171 +++++++++++++++++++++++---------- 2 files changed, 131 insertions(+), 57 deletions(-) diff --git a/node/core/backing/src/error.rs b/node/core/backing/src/error.rs index c5efcea01b77..2e10e75dad4e 100644 --- a/node/core/backing/src/error.rs +++ b/node/core/backing/src/error.rs @@ -17,9 +17,9 @@ use fatality::Nested; use futures::channel::{mpsc, oneshot}; -use polkadot_node_subsystem::{messages::ValidationFailed, SubsystemError}; +use polkadot_node_subsystem::{messages::ValidationFailed, RuntimeApiError, SubsystemError}; use polkadot_node_subsystem_util::Error as UtilError; -use polkadot_primitives::v2::BackedCandidate; +use polkadot_primitives::v2::{BackedCandidate, ValidationCodeHash}; use crate::LOG_TARGET; @@ -42,6 +42,12 @@ pub enum Error { #[error("FetchPoV failed")] FetchPoV, + #[error("Fetching validation code by hash failed {0:?}, {1:?}")] + FetchValidationCode(ValidationCodeHash, RuntimeApiError), + + #[error("No validation code {0:?}")] + NoValidationCode(ValidationCodeHash), + #[error("Candidate rejected by prospective parachains subsystem")] RejectedByProspectiveParachains, @@ -49,12 +55,15 @@ pub enum Error { #[error("Failed to spawn background task")] FailedToSpawnBackgroundTask, - #[error("ValidateFromChainState channel closed before receipt")] - ValidateFromChainState(#[source] oneshot::Canceled), + #[error("ValidateFromExhaustive channel closed before receipt")] + ValidateFromExhaustive(#[source] oneshot::Canceled), #[error("StoreAvailableData channel closed before receipt")] StoreAvailableData(#[source] oneshot::Canceled), + #[error("RuntimeAPISubsystem channel closed before receipt")] + RuntimeApiUnavailable(#[source] oneshot::Canceled), + #[error("a channel was closed before receipt in try_join!")] JoinMultiple(#[source] oneshot::Canceled), diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index eb47deca9a64..664106787c03 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -87,8 +87,8 @@ use polkadot_node_subsystem::{ messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, - ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiRequest, - StatementDistributionMessage, + ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, + RuntimeApiRequest, StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -101,7 +101,7 @@ use polkadot_node_subsystem_util::{ use polkadot_primitives::v2::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, PersistedValidationData, - SessionIndex, SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, + SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::SyncCryptoStorePtr; @@ -526,13 +526,10 @@ async fn make_pov_available( candidate_hash: CandidateHash, validation_data: PersistedValidationData, expected_erasure_root: Hash, - span: Option<&jaeger::Span>, ) -> Result, Error> { let available_data = AvailableData { pov, validation_data }; { - let _span = span.as_ref().map(|s| s.child("erasure-coding").with_candidate(candidate_hash)); - let chunks = erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; let branches = erasure_coding::branches(chunks.as_ref()); @@ -544,8 +541,6 @@ async fn make_pov_available( } { - let _span = span.as_ref().map(|s| s.child("store-data").with_candidate(candidate_hash)); - store_available_data(sender, n_validators as u32, candidate_hash, available_data).await?; } @@ -576,14 +571,17 @@ async fn request_pov( async fn request_candidate_validation( sender: &mut impl overseer::CandidateBackingSenderTrait, + pvd: PersistedValidationData, + code: ValidationCode, candidate_receipt: CandidateReceipt, pov: Arc, ) -> Result { let (tx, rx) = oneshot::channel(); - // TODO [now]: always do exhaustive validation. sender - .send_message(CandidateValidationMessage::ValidateFromChainState( + .send_message(CandidateValidationMessage::ValidateFromExhaustive( + pvd, + code, candidate_receipt, pov, BACKING_EXECUTION_TIMEOUT, @@ -594,7 +592,7 @@ async fn request_candidate_validation( match rx.await { Ok(Ok(validation_result)) => Ok(validation_result), Ok(Err(err)) => Err(Error::ValidationFailed(err)), - Err(err) => Err(Error::ValidateFromChainState(err)), + Err(err) => Err(Error::ValidateFromExhaustive(err)), } } @@ -606,9 +604,9 @@ struct BackgroundValidationParams { tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, candidate: CandidateReceipt, relay_parent: Hash, + persisted_validation_data: PersistedValidationData, pov: PoVData, n_validators: usize, - span: Option, make_command: F, } @@ -623,16 +621,33 @@ async fn validate_and_make_available( mut tx_command, candidate, relay_parent, + persisted_validation_data, pov, n_validators, - span, make_command, } = params; + let validation_code = { + let validation_code_hash = candidate.descriptor().validation_code_hash; + let (tx, rx) = oneshot::channel(); + sender + .send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::ValidationCodeByHash(validation_code_hash, tx), + )) + .await; + + let code = rx.await.map_err(Error::RuntimeApiUnavailable)?; + match code { + Err(e) => return Err(Error::FetchValidationCode(validation_code_hash, e)), + Ok(None) => return Err(Error::NoValidationCode(validation_code_hash)), + Ok(Some(c)) => c, + } + }; + let pov = match pov { PoVData::Ready(pov) => pov, - PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => { - let _span = span.as_ref().map(|s| s.child("request-pov")); + PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => match request_pov(&mut sender, relay_parent, from_validator, candidate_hash, pov_hash) .await { @@ -648,17 +663,18 @@ async fn validate_and_make_available( }, Err(err) => return Err(err), Ok(pov) => pov, - } - }, + }, }; let v = { - let _span = span.as_ref().map(|s| { - s.child("request-validation") - .with_pov(&pov) - .with_para_id(candidate.descriptor().para_id) - }); - request_candidate_validation(&mut sender, candidate.clone(), pov.clone()).await? + request_candidate_validation( + &mut sender, + persisted_validation_data, + validation_code, + candidate.clone(), + pov.clone(), + ) + .await? }; let res = match v { @@ -676,7 +692,6 @@ async fn validate_and_make_available( candidate.hash(), validation_data, candidate.descriptor.erasure_root, - span.as_ref(), ) .await?; @@ -1108,8 +1123,8 @@ async fn handle_validated_candidate_command( ?candidate_hash, "Missing `per_candidate` for seconded candidate.", ); - } - Some(p) => { p.seconded_locally = true } + }, + Some(p) => p.seconded_locally = true, } // TODO [now]: update seconded depths in active leaves. rp_state.issued_statements.insert(candidate_hash); @@ -1159,13 +1174,25 @@ async fn handle_validated_candidate_command( attesting.from_validator = index; let attesting = attesting.clone(); - kick_off_validation_work( - ctx, - rp_state, - &state.background_validation_tx, - attesting, - ) - .await?; + // The candidate state should be available because we've + // validated it before, the relay-parent is still around, + // and candidates are pruned on the basis of relay-parents. + // + // If it's not, then no point in validating it anyway. + if let Some(pvd) = state + .per_candidate + .get(&candidate_hash) + .map(|pc| pc.persisted_validation_data.clone()) + { + kick_off_validation_work( + ctx, + rp_state, + pvd, + &state.background_validation_tx, + attesting, + ) + .await?; + } } } else { gum::warn!( @@ -1329,7 +1356,8 @@ async fn import_statement( candidate.clone(), pvd, tx, - )).await; + )) + .await; match rx.await { Err(oneshot::Canceled) => { @@ -1338,13 +1366,12 @@ async fn import_statement( "Could not reach the Prospective Parachains subsystem." ); - return Err(Error::RejectedByProspectiveParachains); - } - Ok(membership) => { + return Err(Error::RejectedByProspectiveParachains) + }, + Ok(membership) => if membership.is_empty() { - return Err(Error::RejectedByProspectiveParachains); - } - } + return Err(Error::RejectedByProspectiveParachains) + }, } } } @@ -1395,7 +1422,8 @@ async fn import_statement( ctx.send_message(ProspectiveParachainsMessage::CandidateBacked( para_id, candidate_hash, - )).await; + )) + .await; } // The provisioner waits on candidate-backing, which means @@ -1454,7 +1482,14 @@ async fn sign_import_and_distribute_statement( metrics: &Metrics, ) -> Result, Error> { if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { - import_statement(ctx, rp_state, per_candidate, &signed_statement, persisted_validation_data).await?; + import_statement( + ctx, + rp_state, + per_candidate, + &signed_statement, + persisted_validation_data, + ) + .await?; // TODO [now]: if we get an Error::RejectedByProspectiveParachains, // we _do not_ distribute - it has been expunged. @@ -1510,6 +1545,7 @@ async fn background_validate_and_make_available( async fn kick_off_validation_work( ctx: &mut Context, rp_state: &mut PerRelayParentState, + persisted_validation_data: PersistedValidationData, background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, attesting: AttestingData, ) -> Result<(), Error> { @@ -1534,10 +1570,6 @@ async fn kick_off_validation_work( pov_hash: attesting.pov_hash, }; - // TODO [now]: as we refactor validation to always take - // exhaustive parameters, this will need to change. - // - // Also, we will probably need to account for depth here, maybe. background_validate_and_make_available( ctx, rp_state, @@ -1546,9 +1578,9 @@ async fn kick_off_validation_work( tx_command: background_validation_tx.clone(), candidate: attesting.candidate, relay_parent: rp_state.parent, + persisted_validation_data, pov, n_validators: rp_state.table_context.validators.len(), - span: None, make_command: ValidatedCandidateCommand::Attest, }, ) @@ -1583,18 +1615,26 @@ async fn maybe_validate_and_import( // TODO [now]: if we get an Error::RejectedByProspectiveParachains, // we will do nothing. - if let Some(summary) = import_statement(ctx, rp_state, &mut state.per_candidate, &statement, persisted_validation_data).await? { + if let Some(summary) = import_statement( + ctx, + rp_state, + &mut state.per_candidate, + &statement, + persisted_validation_data, + ) + .await? + { // import_statement already takes care of communicating with the // prospective parachains subsystem. At this point, the candidate // has already been accepted into the fragment trees. + let candidate_hash = summary.candidate; + if Some(summary.group_id) != rp_state.assignment { return Ok(()) } let attesting = match statement.payload() { Statement::Seconded(receipt) => { - let candidate_hash = summary.candidate; - let attesting = AttestingData { candidate: rp_state .table @@ -1630,7 +1670,22 @@ async fn maybe_validate_and_import( }, }; - kick_off_validation_work(ctx, rp_state, &state.background_validation_tx, attesting).await?; + // After `import_statement` succeeds, the candidate entry is guaranteed + // to exist. + if let Some(pvd) = state + .per_candidate + .get(&candidate_hash) + .map(|pc| pc.persisted_validation_data.clone()) + { + kick_off_validation_work( + ctx, + rp_state, + pvd, + &state.background_validation_tx, + attesting, + ) + .await?; + } } Ok(()) } @@ -1640,6 +1695,7 @@ async fn maybe_validate_and_import( async fn validate_and_second( ctx: &mut Context, rp_state: &mut PerRelayParentState, + persisted_validation_data: PersistedValidationData, candidate: &CandidateReceipt, pov: Arc, background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, @@ -1662,9 +1718,9 @@ async fn validate_and_second( tx_command: background_validation_tx.clone(), candidate: candidate.clone(), relay_parent: rp_state.parent, + persisted_validation_data, pov: PoVData::Ready(pov), n_validators: rp_state.table_context.validators.len(), - span: None, make_command: ValidatedCandidateCommand::Second, }, ) @@ -1743,7 +1799,16 @@ async fn handle_statement_message( // TODO [now]: get this from the message. let persisted_validation_data: Option = unimplemented!(); - match maybe_validate_and_import(ctx, state, relay_parent, statement, persisted_validation_data, metrics).await { + match maybe_validate_and_import( + ctx, + state, + relay_parent, + statement, + persisted_validation_data, + metrics, + ) + .await + { Err(Error::ValidationFailed(_)) => Ok(()), Err(e) => Err(e), Ok(()) => Ok(()), From 19d7a43257790ea39cf44290448f5ff0c45bdb39 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 20:55:46 -0500 Subject: [PATCH 25/54] return persisted_validation_data from validation --- node/core/backing/src/lib.rs | 38 +++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 664106787c03..783b619077da 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -159,9 +159,9 @@ impl std::fmt::Debug for ValidatedCandidateCommand { impl ValidatedCandidateCommand { fn candidate_hash(&self) -> CandidateHash { match *self { - ValidatedCandidateCommand::Second(Ok((ref candidate, _, _))) => candidate.hash(), + ValidatedCandidateCommand::Second(Ok(ref outputs)) => outputs.candidate.hash(), ValidatedCandidateCommand::Second(Err(ref candidate)) => candidate.hash(), - ValidatedCandidateCommand::Attest(Ok((ref candidate, _, _))) => candidate.hash(), + ValidatedCandidateCommand::Attest(Ok(ref outputs)) => outputs.candidate.hash(), ValidatedCandidateCommand::Attest(Err(ref candidate)) => candidate.hash(), ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => candidate_hash, } @@ -282,6 +282,9 @@ struct State { /// backing is being enabled and complicates code complexity. per_relay_parent: HashMap, /// State tracked for all candidates relevant to the implicit view. + /// + /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit + /// or explicit view for which a `Seconded` statement has been successfully imported. per_candidate: HashMap, /// A cloneable sender which is dispatched to background candidate validation tasks to inform /// the main task of the result. @@ -596,8 +599,15 @@ async fn request_candidate_validation( } } +struct BackgroundValidationOutputs { + candidate: CandidateReceipt, + commitments: CandidateCommitments, + pov: Arc, + persisted_validation_data: PersistedValidationData, +} + type BackgroundValidationResult = - Result<(CandidateReceipt, CandidateCommitments, Arc), CandidateReceipt>; + Result; struct BackgroundValidationParams { sender: S, @@ -690,13 +700,18 @@ async fn validate_and_make_available( n_validators, pov.clone(), candidate.hash(), - validation_data, + validation_data.clone(), candidate.descriptor.erasure_root, ) .await?; match erasure_valid { - Ok(()) => Ok((candidate, commitments, pov.clone())), + Ok(()) => Ok(BackgroundValidationOutputs { + candidate, + commitments, + pov: pov.clone(), + persisted_validation_data: validation_data, + }), Err(InvalidErasureRoot) => { gum::debug!( target: LOG_TARGET, @@ -1083,7 +1098,14 @@ async fn handle_validated_candidate_command( match command { ValidatedCandidateCommand::Second(res) => match res { - Ok((candidate, commitments, _)) => { + Ok(outputs) => { + let BackgroundValidationOutputs { + candidate, + commitments, + persisted_validation_data, + .. + } = outputs; + // sanity check. // TODO [now]: this sanity check is almost certainly // outdated - we now allow seconding multiple candidates @@ -1103,14 +1125,12 @@ async fn handle_validated_candidate_command( // TODO [now]: if we get an Error::RejectedByProspectiveParachains, // then the statement has not been distributed. We need to handle this case - // TODO [now]: get this from changing `BackgroundValidationResult`. - let persisted_validation_data = unimplemented!(); if let Some(stmt) = sign_import_and_distribute_statement( ctx, rp_state, &mut state.per_candidate, statement, - persisted_validation_data, + Some(persisted_validation_data), state.keystore.clone(), metrics, ) From 7f24629faaf51c371f13c4d042a104173295f974 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 27 May 2022 21:08:34 -0500 Subject: [PATCH 26/54] handle rejections by prospective parachains --- node/core/backing/src/lib.rs | 57 ++++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 783b619077da..e38d3056a88b 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1122,10 +1122,11 @@ async fn handle_validated_candidate_command( commitments, }); - // TODO [now]: if we get an Error::RejectedByProspectiveParachains, - // then the statement has not been distributed. We need to handle this case - if let Some(stmt) = sign_import_and_distribute_statement( + // If we get an Error::RejectedByProspectiveParachains, + // then the statement has not been distributed or imported into + // the table. + let res = sign_import_and_distribute_statement( ctx, rp_state, &mut state.per_candidate, @@ -1133,9 +1134,27 @@ async fn handle_validated_candidate_command( Some(persisted_validation_data), state.keystore.clone(), metrics, - ) - .await? - { + ).await; + + if let Err(Error::RejectedByProspectiveParachains) = res { + let candidate_hash = candidate.hash(); + gum::debug!( + target: LOG_TARGET, + relay_parent = ?candidate.descriptor().relay_parent, + ?candidate_hash, + "Attempted to second candidate but was rejected by prospective parachains", + ); + + // Ensure the collator is reported. + ctx.send_message(CollatorProtocolMessage::Invalid( + candidate.descriptor().relay_parent, + candidate, + )).await; + + return Ok(()) + } + + if let Some(stmt) = res? { match state.per_candidate.get_mut(&candidate_hash) { None => { gum::warn!( @@ -1511,9 +1530,6 @@ async fn sign_import_and_distribute_statement( ) .await?; - // TODO [now]: if we get an Error::RejectedByProspectiveParachains, - // we _do not_ distribute - it has been expunged. - // Propagate the error onwards. let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); ctx.send_unbounded_message(smsg); @@ -1633,17 +1649,28 @@ async fn maybe_validate_and_import( }, }; - // TODO [now]: if we get an Error::RejectedByProspectiveParachains, - // we will do nothing. - if let Some(summary) = import_statement( + + let res = import_statement( ctx, rp_state, &mut state.per_candidate, &statement, persisted_validation_data, - ) - .await? - { + ).await; + + // if we get an Error::RejectedByProspectiveParachains, + // we will do nothing. + if let Err(Error::RejectedByProspectiveParachains) = res { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + "Statement rejected by prospective parachains." + ); + + return Ok(()) + } + + if let Some(summary) = res? { // import_statement already takes care of communicating with the // prospective parachains subsystem. At this point, the candidate // has already been accepted into the fragment trees. From 22ead2634f52e2587350d4558d49554771842738 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 28 May 2022 17:01:01 -0500 Subject: [PATCH 27/54] implement seconding sanity check --- node/core/backing/src/lib.rs | 235 ++++++++++++++++++++++++++--------- 1 file changed, 173 insertions(+), 62 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index e38d3056a88b..bc50580836d3 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -88,7 +88,7 @@ use polkadot_node_subsystem::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, - RuntimeApiRequest, StatementDistributionMessage, + RuntimeApiRequest, StatementDistributionMessage, HypotheticalDepthRequest, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -1083,6 +1083,89 @@ async fn construct_per_relay_parent_state( })) } +enum SecondingAllowed { + No, + Yes(Vec<(Hash, Vec)>), +} + +/// Checks whether a candidate can be seconded based on its hypothetical +/// depths in the fragment tree and what we've already seconded in all +/// active leaves. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn seconding_sanity_check( + ctx: &mut Context, + active_leaves: &HashMap, + candidate_hash: CandidateHash, + candidate_para: ParaId, + parent_head_data_hash: Hash, + head_data_hash: Hash, + candidate_relay_parent: Hash, +) -> SecondingAllowed { + // Note that `GetHypotheticalDepths` doesn't account for recursion, + // i.e. candidates can appear at multiple depths in the tree and in fact + // at all depths, and we don't know what depths a candidate will ultimately occupy + // because that's dependent on other candidates we haven't yet received. + // + // The only way to effectively rule this out is to have candidate receipts + // directly commit to the parachain block number or some other incrementing + // counter. That requires a major primitives format upgrade, so for now + // we just rule out trivial cycles. + if parent_head_data_hash == head_data_hash { + return SecondingAllowed::No + } + + let mut membership = Vec::new(); + let mut responses = FuturesOrdered::new(); + for (head, leaf_state) in active_leaves { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalDepth( + HypotheticalDepthRequest { + candidate_hash, + candidate_para, + parent_head_data_hash, + candidate_relay_parent, + fragment_tree_relay_parent: *head, + }, + tx, + )).await; + responses.push(rx.map_ok(move |depths| (depths, head, leaf_state))); + } + + for response in responses.next().await { + match response { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Failed to reach prospective parachains subsystem for hypothetical depths", + ); + + return SecondingAllowed::No; + } + Ok((depths, head, leaf_state)) => { + for depth in &depths { + if leaf_state.seconded_at_depth.contains_key(&depth) { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + depth, + leaf_hash = ?head, + "Refusing to second candidate at depth - already occupied." + ); + + return SecondingAllowed::No; + } + } + + membership.push((*head, depths)); + } + } + } + + // At this point we've checked the depths of the candidate against all active + // leaves. + SecondingAllowed::Yes(membership) +} + #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn handle_validated_candidate_command( ctx: &mut Context, @@ -1106,75 +1189,103 @@ async fn handle_validated_candidate_command( .. } = outputs; - // sanity check. - // TODO [now]: this sanity check is almost certainly - // outdated - we now allow seconding multiple candidates - // per relay-parent. update it to properly defend against - // seconding stuff wrongly. - // - // The way we'll do this is by asking the prospective parachains - // subsystem about the hypothetical depth of the candidate at all - // active leaves and then ensuring we've not seconded anything with - // those depths at any of our active leaves. - if !rp_state.issued_statements.contains(&candidate_hash) { - let statement = Statement::Seconded(CommittedCandidateReceipt { - descriptor: candidate.descriptor.clone(), - commitments, - }); - - - // If we get an Error::RejectedByProspectiveParachains, - // then the statement has not been distributed or imported into - // the table. - let res = sign_import_and_distribute_statement( - ctx, - rp_state, - &mut state.per_candidate, - statement, - Some(persisted_validation_data), - state.keystore.clone(), - metrics, - ).await; - - if let Err(Error::RejectedByProspectiveParachains) = res { - let candidate_hash = candidate.hash(); - gum::debug!( - target: LOG_TARGET, - relay_parent = ?candidate.descriptor().relay_parent, - ?candidate_hash, - "Attempted to second candidate but was rejected by prospective parachains", - ); - - // Ensure the collator is reported. - ctx.send_message(CollatorProtocolMessage::Invalid( - candidate.descriptor().relay_parent, - candidate, - )).await; - - return Ok(()) + if rp_state.issued_statements.contains(&candidate_hash) { + return Ok(()) + } + + // sanity check that we're allowed to second the candidate + // and that it doesn't conflict with other candidates we've + // seconded. + let fragment_tree_membership = match seconding_sanity_check( + ctx, + &state.per_leaf, + candidate_hash, + candidate.descriptor().para_id, + persisted_validation_data.parent_head.hash(), + candidate.descriptor().relay_parent, + commitments.head_data.hash(), + ).await { + SecondingAllowed::No => return Ok(()), + SecondingAllowed::Yes(membership) => membership, + }; + + let statement = Statement::Seconded(CommittedCandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments, + }); + + + // If we get an Error::RejectedByProspectiveParachains, + // then the statement has not been distributed or imported into + // the table. + let res = sign_import_and_distribute_statement( + ctx, + rp_state, + &mut state.per_candidate, + statement, + Some(persisted_validation_data), + state.keystore.clone(), + metrics, + ).await; + + if let Err(Error::RejectedByProspectiveParachains) = res { + let candidate_hash = candidate.hash(); + gum::debug!( + target: LOG_TARGET, + relay_parent = ?candidate.descriptor().relay_parent, + ?candidate_hash, + "Attempted to second candidate but was rejected by prospective parachains", + ); + + // Ensure the collator is reported. + ctx.send_message(CollatorProtocolMessage::Invalid( + candidate.descriptor().relay_parent, + candidate, + )).await; + + return Ok(()) + } + + if let Some(stmt) = res? { + match state.per_candidate.get_mut(&candidate_hash) { + None => { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + "Missing `per_candidate` for seconded candidate.", + ); + }, + Some(p) => p.seconded_locally = true, } - if let Some(stmt) = res? { - match state.per_candidate.get_mut(&candidate_hash) { + // update seconded depths in active leaves. + for (leaf, depths) in fragment_tree_membership { + let leaf_data = match state.per_leaf.get_mut(&leaf) { None => { gum::warn!( target: LOG_TARGET, - ?candidate_hash, - "Missing `per_candidate` for seconded candidate.", + leaf_hash = ?leaf, + "Missing `per_leaf` for known active leaf." ); - }, - Some(p) => p.seconded_locally = true, + + continue + } + Some(d) => d, + }; + + for depth in depths { + leaf_data.seconded_at_depth.insert(depth, candidate_hash); } - // TODO [now]: update seconded depths in active leaves. - rp_state.issued_statements.insert(candidate_hash); - - metrics.on_candidate_seconded(); - ctx.send_message(CollatorProtocolMessage::Seconded( - rp_state.parent, - stmt, - )) - .await; } + + rp_state.issued_statements.insert(candidate_hash); + + metrics.on_candidate_seconded(); + ctx.send_message(CollatorProtocolMessage::Seconded( + rp_state.parent, + stmt, + )) + .await; } }, Err(candidate) => { From 6738ee9cb3dab1f4fe4f4f296a44f325dcb209f5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 28 May 2022 17:06:07 -0500 Subject: [PATCH 28/54] invoke validate_and_second --- node/core/backing/src/lib.rs | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index bc50580836d3..95584de4d34b 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1927,20 +1927,26 @@ async fn handle_second_message( } // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a - // Seconded statement only if we have not seconded any other candidate and - // have not signed a Valid statement for the requested candidate. + // Seconded statement only if we have not signed a Valid statement for the requested candidate. // - // TODO [now]: this check is outdated. we need to only second when we have seconded - // nothing else with the hypothetical depth of the candidate in all our active leaves. - - // if self.seconded.is_none() { - // // This job has not seconded a candidate yet. - - // if !self.issued_statements.contains(&candidate_hash) { - // let pov = Arc::new(pov); - // self.validate_and_second(&span, &root_span, ctx, &candidate, pov).await?; - // } - // } + // The actual logic of issuing the signed statement checks that this isn't + // conflicting with other seconded candidates. Not doing that check here + // gives other subsystems the ability to get us to execute arbitrary candidates, + // but no more. + if !rp_state.issued_statements.contains(&candidate_hash) { + let pov = Arc::new(pov); + + // TODO [now]: get from message. + let persisted_validation_data = unimplemented!(); + validate_and_second( + ctx, + rp_state, + persisted_validation_data, + &candidate, + pov, + &state.background_validation_tx, + ).await?; + } Ok(()) } From c54e1ca603ba56c4eac3ff5a4ceb1ce867ea4518 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 28 May 2022 17:23:58 -0500 Subject: [PATCH 29/54] Alter statement table to allow multiple seconded messages per validator --- node/core/backing/src/lib.rs | 14 ++--- statement-table/src/generic.rs | 97 ++++++++++++++++++++++++++-------- statement-table/src/lib.rs | 2 +- 3 files changed, 83 insertions(+), 30 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 95584de4d34b..5bd69069dd99 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -111,7 +111,7 @@ use statement_table::{ SignedStatement as TableSignedStatement, Statement as TableStatement, Summary as TableSummary, }, - Context as TableContextTrait, Table, + Config as TableConfig, Context as TableContextTrait, Table, }; mod error; @@ -1061,6 +1061,12 @@ async fn construct_per_relay_parent_state( } let table_context = TableContext { groups, validators, validator }; + let table_config = TableConfig { + allow_multiple_seconded: match mode { + ProspectiveParachainsMode::Enabled => true, + ProspectiveParachainsMode::Disabled => false, + }, + }; // TODO [now]: I've removed the `required_collator` more broadly, // because it's not used in practice and was intended for parathreads. @@ -1075,7 +1081,7 @@ async fn construct_per_relay_parent_state( session_index, assignment, backed: HashSet::new(), - table: Table::default(), + table: Table::new(table_config), table_context, issued_statements: HashSet::new(), awaiting_validation: HashSet::new(), @@ -1544,10 +1550,6 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - // TODO [now]: we violate the pre-existing checks that each validator may - // only second one candidate. - // - // We will need to address this so we don't get errors incorrectly. let summary = rp_state.table.import_statement(&rp_state.table_context, stmt); if let Some(attested) = summary diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index d899c54d1d53..8d7838dd32d9 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -61,6 +61,14 @@ pub trait Context { fn requisite_votes(&self, group: &Self::GroupId) -> usize; } +/// Table configuration. +pub struct Config { + /// When this is true, the table will allow multiple seconded candidates + /// per authority. This flag means that higher-level code is responsible for + /// bounding the number of candidates. + pub allow_multiple_seconded: bool, +} + /// Statements circulated among peers. #[derive(PartialEq, Eq, Debug, Clone, Encode, Decode)] pub enum Statement { @@ -270,12 +278,12 @@ impl CandidateData { // authority metadata struct AuthorityData { - proposal: Option<(Ctx::Digest, Ctx::Signature)>, + proposals: Vec<(Ctx::Digest, Ctx::Signature)>, } impl Default for AuthorityData { fn default() -> Self { - AuthorityData { proposal: None } + AuthorityData { proposals: Vec::new() } } } @@ -290,19 +298,20 @@ pub struct Table { authority_data: HashMap>, detected_misbehavior: HashMap>>, candidate_votes: HashMap>, + config: Config, } -impl Default for Table { - fn default() -> Self { +impl Table { + /// Create a new `Table` from a `Config`. + pub fn new(config: Config) -> Self { Table { - authority_data: HashMap::new(), - detected_misbehavior: HashMap::new(), - candidate_votes: HashMap::new(), + authority_data: HashMap::default(), + detected_misbehavior: HashMap::default(), + candidate_votes: HashMap::default(), + config, } } -} -impl Table { /// Get the attested candidate for `digest`. /// /// Returns `Some(_)` if the candidate exists and is includable. @@ -393,7 +402,9 @@ impl Table { // note misbehavior. let existing = occ.get_mut(); - if let Some((ref old_digest, ref old_sig)) = existing.proposal { + if !self.config.allow_multiple_seconded && existing.proposals.len() == 1 { + let &(ref old_digest, ref old_sig) = &existing.proposals[0]; + if old_digest != &digest { const EXISTENCE_PROOF: &str = "when proposal first received from authority, candidate \ @@ -413,15 +424,17 @@ impl Table { })) } + false + } else if self.config.allow_multiple_seconded && existing.proposals.iter().find(|(ref od, _)| od == &digest).is_some() { false } else { - existing.proposal = Some((digest.clone(), signature.clone())); + existing.proposals.push((digest.clone(), signature.clone())); true } }, Entry::Vacant(vacant) => { vacant - .insert(AuthorityData { proposal: Some((digest.clone(), signature.clone())) }); + .insert(AuthorityData { proposals: vec![(digest.clone(), signature.clone())] }); true }, }; @@ -571,8 +584,16 @@ mod tests { use super::*; use std::collections::HashMap; - fn create() -> Table { - Table::default() + fn create_single_seconded() -> Table { + Table::new(Config { + allow_multiple_seconded: false, + }) + } + + fn create_many_seconded() -> Table { + Table::new(Config { + allow_multiple_seconded: true, + }) } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] @@ -630,7 +651,7 @@ mod tests { } #[test] - fn submitting_two_candidates_is_misbehavior() { + fn submitting_two_candidates_can_be_misbehavior() { let context = TestContext { authorities: { let mut map = HashMap::new(); @@ -639,7 +660,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement_a = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -665,6 +686,36 @@ mod tests { ); } + #[test] + fn submitting_two_candidates_can_be_allowed() { + let context = TestContext { + authorities: { + let mut map = HashMap::new(); + map.insert(AuthorityId(1), GroupId(2)); + map + }, + }; + + let mut table = create_many_seconded(); + let statement_a = SignedStatement { + statement: Statement::Seconded(Candidate(2, 100)), + signature: Signature(1), + sender: AuthorityId(1), + }; + + let statement_b = SignedStatement { + statement: Statement::Seconded(Candidate(2, 999)), + signature: Signature(1), + sender: AuthorityId(1), + }; + + table.import_statement(&context, statement_a); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); + + table.import_statement(&context, statement_b); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); + } + #[test] fn submitting_candidate_from_wrong_group_is_misbehavior() { let context = TestContext { @@ -675,7 +726,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -707,7 +758,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let candidate_a = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), @@ -751,7 +802,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -781,7 +832,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -849,7 +900,7 @@ mod tests { }; // have 2/3 validity guarantors note validity. - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -883,7 +934,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -910,7 +961,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), diff --git a/statement-table/src/lib.rs b/statement-table/src/lib.rs index a3fbbb1fdaaa..3bd586f09da9 100644 --- a/statement-table/src/lib.rs +++ b/statement-table/src/lib.rs @@ -16,7 +16,7 @@ pub mod generic; -pub use generic::{Context, Table}; +pub use generic::{Config, Context, Table}; /// Concrete instantiations suitable for v2 primitives. pub mod v2 { From e855b6cf825f56a878a1d3793b7d421772cb9c68 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 29 May 2022 21:50:27 -0500 Subject: [PATCH 30/54] refactor backing to have statements carry PVD --- node/core/backing/src/lib.rs | 159 ++++++++++++--------------- node/primitives/src/disputes/mod.rs | 26 +++-- node/primitives/src/lib.rs | 77 +++++++++++++ node/subsystem-types/src/messages.rs | 12 +- primitives/src/v2/signed.rs | 29 ++++- statement-table/src/generic.rs | 12 +- 6 files changed, 200 insertions(+), 115 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 5bd69069dd99..a66a4b40e646 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -79,16 +79,17 @@ use futures::{ use error::{Error, FatalResult}; use polkadot_node_primitives::{ - AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatement, Statement, - ValidationResult, BACKING_EXECUTION_TIMEOUT, + AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatement, + SignedFullStatementWithPVD, Statement, StatementWithPVD, ValidationResult, + BACKING_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ jaeger, messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, - ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, - RuntimeApiRequest, StatementDistributionMessage, HypotheticalDepthRequest, + HypotheticalDepthRequest, ProspectiveParachainsMessage, ProvisionableData, + ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -435,10 +436,10 @@ struct InvalidErasureRoot; // It looks like it's not possible to do an `impl From` given the current state of // the code. So this does the necessary conversion. -fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { +fn primitive_statement_to_table(s: &SignedFullStatementWithPVD) -> TableSignedStatement { let statement = match s.payload() { - Statement::Seconded(c) => TableStatement::Seconded(c.clone()), - Statement::Valid(h) => TableStatement::Valid(h.clone()), + StatementWithPVD::Seconded(c, _) => TableStatement::Seconded(c.clone()), + StatementWithPVD::Valid(h) => TableStatement::Valid(h.clone()), }; TableSignedStatement { @@ -606,8 +607,7 @@ struct BackgroundValidationOutputs { persisted_validation_data: PersistedValidationData, } -type BackgroundValidationResult = - Result; +type BackgroundValidationResult = Result; struct BackgroundValidationParams { sender: S, @@ -756,8 +756,8 @@ async fn handle_communication( metrics: &Metrics, ) -> Result<(), Error> { match message { - CandidateBackingMessage::Second(_relay_parent, candidate, pov) => { - handle_second_message(ctx, state, candidate, pov, metrics).await?; + CandidateBackingMessage::Second(_relay_parent, candidate, pvd, pov) => { + handle_second_message(ctx, state, candidate, pvd, pov, metrics).await?; }, CandidateBackingMessage::Statement(relay_parent, statement) => { handle_statement_message(ctx, state, relay_parent, statement, metrics).await?; @@ -1133,7 +1133,8 @@ async fn seconding_sanity_check( fragment_tree_relay_parent: *head, }, tx, - )).await; + )) + .await; responses.push(rx.map_ok(move |depths| (depths, head, leaf_state))); } @@ -1145,8 +1146,8 @@ async fn seconding_sanity_check( "Failed to reach prospective parachains subsystem for hypothetical depths", ); - return SecondingAllowed::No; - } + return SecondingAllowed::No + }, Ok((depths, head, leaf_state)) => { for depth in &depths { if leaf_state.seconded_at_depth.contains_key(&depth) { @@ -1158,12 +1159,12 @@ async fn seconding_sanity_check( "Refusing to second candidate at depth - already occupied." ); - return SecondingAllowed::No; + return SecondingAllowed::No } } membership.push((*head, depths)); - } + }, } } @@ -1210,16 +1211,20 @@ async fn handle_validated_candidate_command( persisted_validation_data.parent_head.hash(), candidate.descriptor().relay_parent, commitments.head_data.hash(), - ).await { + ) + .await + { SecondingAllowed::No => return Ok(()), SecondingAllowed::Yes(membership) => membership, }; - let statement = Statement::Seconded(CommittedCandidateReceipt { - descriptor: candidate.descriptor.clone(), - commitments, - }); - + let statement = StatementWithPVD::Seconded( + CommittedCandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments, + }, + persisted_validation_data, + ); // If we get an Error::RejectedByProspectiveParachains, // then the statement has not been distributed or imported into @@ -1229,10 +1234,10 @@ async fn handle_validated_candidate_command( rp_state, &mut state.per_candidate, statement, - Some(persisted_validation_data), state.keystore.clone(), metrics, - ).await; + ) + .await; if let Err(Error::RejectedByProspectiveParachains) = res { let candidate_hash = candidate.hash(); @@ -1247,7 +1252,8 @@ async fn handle_validated_candidate_command( ctx.send_message(CollatorProtocolMessage::Invalid( candidate.descriptor().relay_parent, candidate, - )).await; + )) + .await; return Ok(()) } @@ -1275,7 +1281,7 @@ async fn handle_validated_candidate_command( ); continue - } + }, Some(d) => d, }; @@ -1289,7 +1295,7 @@ async fn handle_validated_candidate_command( metrics.on_candidate_seconded(); ctx.send_message(CollatorProtocolMessage::Seconded( rp_state.parent, - stmt, + StatementWithPVD::drop_pvd_from_signed(stmt), )) .await; } @@ -1308,14 +1314,13 @@ async fn handle_validated_candidate_command( // sanity check. if !rp_state.issued_statements.contains(&candidate_hash) { if res.is_ok() { - let statement = Statement::Valid(candidate_hash); + let statement = StatementWithPVD::Valid(candidate_hash); sign_import_and_distribute_statement( ctx, rp_state, &mut state.per_candidate, statement, - None, // only needed when seconding. state.keystore.clone(), metrics, ) @@ -1371,10 +1376,10 @@ async fn handle_validated_candidate_command( async fn sign_statement( rp_state: &PerRelayParentState, - statement: Statement, + statement: StatementWithPVD, keystore: SyncCryptoStorePtr, metrics: &Metrics, -) -> Option { +) -> Option { let signed = rp_state .table_context .validator @@ -1404,7 +1409,7 @@ async fn dispatch_new_statement_to_dispute_coordinator( ctx: &mut Context, rp_state: &PerRelayParentState, candidate_hash: CandidateHash, - statement: &SignedFullStatement, + statement: &SignedFullStatementWithPVD, ) -> Result<(), ValidatorIndexOutOfBounds> { // Dispatch the statement to the dispute coordinator. let validator_index = statement.validator_index(); @@ -1417,8 +1422,8 @@ async fn dispatch_new_statement_to_dispute_coordinator( }; let maybe_candidate_receipt = match statement.payload() { - Statement::Seconded(receipt) => Some(receipt.to_plain()), - Statement::Valid(candidate_hash) => { + StatementWithPVD::Seconded(receipt, _) => Some(receipt.to_plain()), + StatementWithPVD::Valid(candidate_hash) => { // Valid statements are only supposed to be imported // once we've seen at least one `Seconded` statement. rp_state.table.get_candidate(&candidate_hash).map(|c| c.to_plain()) @@ -1462,8 +1467,7 @@ async fn import_statement( ctx: &mut Context, rp_state: &mut PerRelayParentState, per_candidate: &mut HashMap, - statement: &SignedFullStatement, - persisted_validation_data: Option, + statement: &SignedFullStatementWithPVD, ) -> Result, Error> { gum::debug!( target: LOG_TARGET, @@ -1487,13 +1491,8 @@ async fn import_statement( // // We should also not accept any candidates which have no valid depths under any of // our active leaves. - if let Statement::Seconded(candidate) = statement.payload() { + if let StatementWithPVD::Seconded(candidate, pvd) = statement.payload() { if !per_candidate.contains_key(&candidate_hash) { - let pvd = match persisted_validation_data { - None => return Err(Error::RejectedByProspectiveParachains), - Some(pvd) => pvd, - }; - per_candidate.insert( candidate_hash, PerCandidateState { @@ -1510,7 +1509,7 @@ async fn import_statement( ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( candidate.descriptor().para_id, candidate.clone(), - pvd, + pvd.clone(), tx, )) .await; @@ -1534,7 +1533,7 @@ async fn import_statement( } if let Err(ValidatorIndexOutOfBounds) = - dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, &statement) + dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, statement) .await { gum::warn!( @@ -1620,30 +1619,22 @@ fn issue_new_misbehaviors( } /// Sign, import, and distribute a statement. -/// -/// If the statement is a `Seconded` statement, the `persisted_validation_data` -/// must be `Some`. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn sign_import_and_distribute_statement( ctx: &mut Context, rp_state: &mut PerRelayParentState, per_candidate: &mut HashMap, - statement: Statement, - persisted_validation_data: Option, + statement: StatementWithPVD, keystore: SyncCryptoStorePtr, metrics: &Metrics, -) -> Result, Error> { +) -> Result, Error> { if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { - import_statement( - ctx, - rp_state, - per_candidate, - &signed_statement, - persisted_validation_data, - ) - .await?; + import_statement(ctx, rp_state, per_candidate, &signed_statement).await?; - let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); + let smsg = StatementDistributionMessage::Share( + rp_state.parent, + StatementWithPVD::drop_pvd_from_signed(signed_statement.clone()), + ); ctx.send_unbounded_message(smsg); Ok(Some(signed_statement)) @@ -1737,16 +1728,12 @@ async fn kick_off_validation_work( } /// Import the statement and kick off validation work if it is a part of our assignment. -/// -/// If the statement type is `Seconded`, the `persisted_validation_data` must be -/// `Some`. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn maybe_validate_and_import( ctx: &mut Context, state: &mut State, relay_parent: Hash, - statement: SignedFullStatement, - persisted_validation_data: Option, + statement: SignedFullStatementWithPVD, metrics: &Metrics, ) -> Result<(), Error> { let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { @@ -1762,14 +1749,7 @@ async fn maybe_validate_and_import( }, }; - - let res = import_statement( - ctx, - rp_state, - &mut state.per_candidate, - &statement, - persisted_validation_data, - ).await; + let res = import_statement(ctx, rp_state, &mut state.per_candidate, &statement).await; // if we get an Error::RejectedByProspectiveParachains, // we will do nothing. @@ -1794,7 +1774,7 @@ async fn maybe_validate_and_import( return Ok(()) } let attesting = match statement.payload() { - Statement::Seconded(receipt) => { + StatementWithPVD::Seconded(receipt, _) => { let attesting = AttestingData { candidate: rp_state .table @@ -1808,7 +1788,7 @@ async fn maybe_validate_and_import( rp_state.fallbacks.insert(summary.candidate, attesting.clone()); attesting }, - Statement::Valid(candidate_hash) => { + StatementWithPVD::Valid(candidate_hash) => { if let Some(attesting) = rp_state.fallbacks.get_mut(candidate_hash) { let our_index = rp_state.table_context.validator.as_ref().map(|v| v.index()); if our_index == Some(statement.validator_index()) { @@ -1894,6 +1874,7 @@ async fn handle_second_message( ctx: &mut Context, state: &mut State, candidate: CandidateReceipt, + persisted_validation_data: PersistedValidationData, pov: PoV, metrics: &Metrics, ) -> Result<(), Error> { @@ -1902,6 +1883,16 @@ async fn handle_second_message( let candidate_hash = candidate.hash(); let relay_parent = candidate.descriptor().relay_parent; + if candidate.descriptor().persisted_validation_data_hash != persisted_validation_data.hash() { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + "Candidate backing was asked to second candidate with wrong PVD", + ); + + return Ok(()) + } + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { None => { gum::trace!( @@ -1938,8 +1929,6 @@ async fn handle_second_message( if !rp_state.issued_statements.contains(&candidate_hash) { let pov = Arc::new(pov); - // TODO [now]: get from message. - let persisted_validation_data = unimplemented!(); validate_and_second( ctx, rp_state, @@ -1947,7 +1936,8 @@ async fn handle_second_message( &candidate, pov, &state.background_validation_tx, - ).await?; + ) + .await?; } Ok(()) @@ -1958,23 +1948,12 @@ async fn handle_statement_message( ctx: &mut Context, state: &mut State, relay_parent: Hash, - statement: SignedFullStatement, + statement: SignedFullStatementWithPVD, metrics: &Metrics, ) -> Result<(), Error> { let _timer = metrics.time_process_statement(); - // TODO [now]: get this from the message. - let persisted_validation_data: Option = unimplemented!(); - match maybe_validate_and_import( - ctx, - state, - relay_parent, - statement, - persisted_validation_data, - metrics, - ) - .await - { + match maybe_validate_and_import(ctx, state, relay_parent, statement, metrics).await { Err(Error::ValidationFailed(_)) => Ok(()), Err(e) => Err(e), Ok(()) => Ok(()), diff --git a/node/primitives/src/disputes/mod.rs b/node/primitives/src/disputes/mod.rs index 4b2d636dc10e..01293d2b64f0 100644 --- a/node/primitives/src/disputes/mod.rs +++ b/node/primitives/src/disputes/mod.rs @@ -19,10 +19,10 @@ use parity_scale_codec::{Decode, Encode}; use sp_application_crypto::AppKey; use sp_keystore::{CryptoStore, Error as KeystoreError, SyncCryptoStorePtr}; -use super::{Statement, UncheckedSignedFullStatement}; use polkadot_primitives::v2::{ - CandidateHash, CandidateReceipt, DisputeStatement, InvalidDisputeStatementKind, SessionIndex, - SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, EncodeAs, + InvalidDisputeStatementKind, SessionIndex, SigningContext, UncheckedSigned, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; /// `DisputeMessage` and related types. @@ -174,19 +174,23 @@ impl SignedDisputeStatement { /// along with the signing context. /// /// This does signature checks again with the data provided. - pub fn from_backing_statement( - backing_statement: &UncheckedSignedFullStatement, + pub fn from_backing_statement( + backing_statement: &UncheckedSigned, signing_context: SigningContext, validator_public: ValidatorId, - ) -> Result { - let (statement_kind, candidate_hash) = match backing_statement.unchecked_payload() { - Statement::Seconded(candidate) => ( + ) -> Result + where + for<'a> &'a T: Into, + T: EncodeAs, + { + let (statement_kind, candidate_hash) = match backing_statement.unchecked_payload().into() { + CompactStatement::Seconded(candidate_hash) => ( ValidDisputeStatementKind::BackingSeconded(signing_context.parent_hash), - candidate.hash(), + candidate_hash, ), - Statement::Valid(candidate_hash) => ( + CompactStatement::Valid(candidate_hash) => ( ValidDisputeStatementKind::BackingValid(signing_context.parent_hash), - *candidate_hash, + candidate_hash, ), }; diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index c203e560647d..ffb08281170d 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -196,6 +196,76 @@ impl EncodeAs for Statement { } } +/// A statement, exactly the same as [`Statement`] but where seconded messages carry +/// the [`PersistedValidationData`]. +#[derive(Clone, PartialEq, Eq)] +pub enum StatementWithPVD { + /// A statement that a validator seconds a candidate. + Seconded(CommittedCandidateReceipt, PersistedValidationData), + /// A statement that a validator has deemed a candidate valid. + Valid(CandidateHash), +} + +impl std::fmt::Debug for StatementWithPVD { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StatementWithPVD::Seconded(seconded, _) => + write!(f, "Seconded: {:?}", seconded.descriptor), + StatementWithPVD::Valid(hash) => write!(f, "Valid: {:?}", hash), + } + } +} + +impl StatementWithPVD { + /// Get the candidate hash referenced by this statement. + /// + /// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be expensive + /// for large candidates. + pub fn candidate_hash(&self) -> CandidateHash { + match *self { + StatementWithPVD::Valid(ref h) => *h, + StatementWithPVD::Seconded(ref c, _) => c.hash(), + } + } + + /// Transform this statement into its compact version, which references only the hash + /// of the candidate. + pub fn to_compact(&self) -> CompactStatement { + match *self { + StatementWithPVD::Seconded(ref c, _) => CompactStatement::Seconded(c.hash()), + StatementWithPVD::Valid(hash) => CompactStatement::Valid(hash), + } + } + + /// Drop the [`PersistedValidationData`] from the statement. + pub fn drop_pvd(self) -> Statement { + match self { + StatementWithPVD::Seconded(c, _) => Statement::Seconded(c), + StatementWithPVD::Valid(c_h) => Statement::Valid(c_h), + } + } + + /// Drop the [`PersistedValidationData`] from the statement in a signed + /// variant. + pub fn drop_pvd_from_signed(signed: SignedFullStatementWithPVD) -> SignedFullStatement { + signed + .convert_to_superpayload_with(|s| s.drop_pvd()) + .expect("persisted_validation_data doesn't affect encoded_as; qed") + } +} + +impl From<&'_ StatementWithPVD> for CompactStatement { + fn from(stmt: &StatementWithPVD) -> Self { + stmt.to_compact() + } +} + +impl EncodeAs for StatementWithPVD { + fn encode_as(&self) -> Vec { + self.to_compact().encode() + } +} + /// A statement, the corresponding signature, and the index of the sender. /// /// Signing context and validator set should be apparent from context. @@ -207,6 +277,13 @@ pub type SignedFullStatement = Signed; /// Variant of `SignedFullStatement` where the signature has not yet been verified. pub type UncheckedSignedFullStatement = UncheckedSigned; +/// A statement, the corresponding signature, and the index of the sender. +/// +/// Seconded statements are accompanied by the [`PersistedValidationData`] +/// +/// Signing context and validator set should be apparent from context. +pub type SignedFullStatementWithPVD = Signed; + /// Candidate invalidity details #[derive(Debug)] pub enum InvalidCandidate { diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 1c5e2e3381e5..c673f679b7fd 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -36,7 +36,7 @@ use polkadot_node_primitives::{ approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote}, AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, CollationSecondedSignal, DisputeMessage, ErasureChunk, PoV, SignedDisputeStatement, - SignedFullStatement, ValidationResult, + SignedFullStatement, SignedFullStatementWithPVD, ValidationResult, }; use polkadot_primitives::{ v2::{ @@ -75,17 +75,17 @@ pub enum CandidateBackingMessage { GetBackedCandidates(Hash, Vec, oneshot::Sender>), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated. - Second(Hash, CandidateReceipt, PoV), - /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated - /// to a broader check by Misbehavior Arbitration. Agreements are simply tallied until a quorum is reached. - Statement(Hash, SignedFullStatement), + Second(Hash, CandidateReceipt, PersistedValidationData, PoV), + /// Note a validator's statement about a particular candidate. + /// Agreements are simply tallied until a quorum is reached. + Statement(Hash, SignedFullStatementWithPVD), } impl BoundToRelayParent for CandidateBackingMessage { fn relay_parent(&self) -> Hash { match self { Self::GetBackedCandidates(hash, _, _) => *hash, - Self::Second(hash, _, _) => *hash, + Self::Second(hash, _, _, _) => *hash, Self::Statement(hash, _) => *hash, } } diff --git a/primitives/src/v2/signed.rs b/primitives/src/v2/signed.rs index 28c3b790039f..bebc2c0208c9 100644 --- a/primitives/src/v2/signed.rs +++ b/primitives/src/v2/signed.rs @@ -157,7 +157,6 @@ impl, RealPayload: Encode> Signed Result, (Self, SuperPayload)> where SuperPayload: EncodeAs, - Payload: Encode, { if claimed.encode_as() == self.0.payload.encode_as() { Ok(Signed(UncheckedSigned { @@ -170,6 +169,34 @@ impl, RealPayload: Encode> Signed( + self, + convert: F, + ) -> Result, SuperPayload> + where + F: FnOnce(Payload) -> SuperPayload, + SuperPayload: EncodeAs, + { + let expected_encode_as = self.0.payload.encode_as(); + let converted = convert(self.0.payload); + if converted.encode_as() == expected_encode_as { + Ok(Signed(UncheckedSigned { + payload: converted, + validator_index: self.0.validator_index, + signature: self.0.signature, + real_payload: sp_std::marker::PhantomData, + })) + } else { + Err(converted) + } + } } // We can't bound this on `Payload: Into` because that conversion consumes diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index 8d7838dd32d9..eb5def0cef44 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -425,7 +425,9 @@ impl Table { } false - } else if self.config.allow_multiple_seconded && existing.proposals.iter().find(|(ref od, _)| od == &digest).is_some() { + } else if self.config.allow_multiple_seconded && + existing.proposals.iter().find(|(ref od, _)| od == &digest).is_some() + { false } else { existing.proposals.push((digest.clone(), signature.clone())); @@ -585,15 +587,11 @@ mod tests { use std::collections::HashMap; fn create_single_seconded() -> Table { - Table::new(Config { - allow_multiple_seconded: false, - }) + Table::new(Config { allow_multiple_seconded: false }) } fn create_many_seconded() -> Table { - Table::new(Config { - allow_multiple_seconded: true, - }) + Table::new(Config { allow_multiple_seconded: true }) } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] From eb0ee293470cea9e498d33b286b5843d5ba0676a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 29 May 2022 21:53:50 -0500 Subject: [PATCH 31/54] clean up all warnings --- node/core/backing/src/lib.rs | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a66a4b40e646..23f9881186eb 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -79,12 +79,10 @@ use futures::{ use error::{Error, FatalResult}; use polkadot_node_primitives::{ - AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatement, - SignedFullStatementWithPVD, Statement, StatementWithPVD, ValidationResult, - BACKING_EXECUTION_TIMEOUT, + AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatementWithPVD, + StatementWithPVD, ValidationResult, BACKING_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ - jaeger, messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, @@ -237,10 +235,6 @@ enum ProspectiveParachainsMode { } impl ProspectiveParachainsMode { - fn is_disabled(&self) -> bool { - self == &ProspectiveParachainsMode::Disabled - } - fn is_enabled(&self) -> bool { self == &ProspectiveParachainsMode::Enabled } @@ -361,7 +355,6 @@ async fn run_iteration( &mut *ctx, update, state, - &metrics, ).await?; } FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {} @@ -603,7 +596,6 @@ async fn request_candidate_validation( struct BackgroundValidationOutputs { candidate: CandidateReceipt, commitments: CandidateCommitments, - pov: Arc, persisted_validation_data: PersistedValidationData, } @@ -709,7 +701,6 @@ async fn validate_and_make_available( Ok(()) => Ok(BackgroundValidationOutputs { candidate, commitments, - pov: pov.clone(), persisted_validation_data: validation_data, }), Err(InvalidErasureRoot) => { @@ -785,7 +776,6 @@ async fn handle_active_leaves_update( ctx: &mut Context, update: ActiveLeavesUpdate, state: &mut State, - metrics: &Metrics, ) -> Result<(), Error> { enum LeafHasProspectiveParachains { Enabled(Result, ImplicitViewFetchError>), @@ -1193,7 +1183,6 @@ async fn handle_validated_candidate_command( candidate, commitments, persisted_validation_data, - .. } = outputs; if rp_state.issued_statements.contains(&candidate_hash) { @@ -1694,8 +1683,6 @@ async fn kick_off_validation_work( return Ok(()) } - let descriptor = attesting.candidate.descriptor().clone(); - gum::debug!( target: LOG_TARGET, candidate_hash = ?candidate_hash, @@ -1734,7 +1721,6 @@ async fn maybe_validate_and_import( state: &mut State, relay_parent: Hash, statement: SignedFullStatementWithPVD, - metrics: &Metrics, ) -> Result<(), Error> { let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { Some(r) => r, @@ -1953,7 +1939,7 @@ async fn handle_statement_message( ) -> Result<(), Error> { let _timer = metrics.time_process_statement(); - match maybe_validate_and_import(ctx, state, relay_parent, statement, metrics).await { + match maybe_validate_and_import(ctx, state, relay_parent, statement).await { Err(Error::ValidationFailed(_)) => Ok(()), Err(e) => Err(e), Ok(()) => Ok(()), From d80c190722af8a3f18e65e99f0928dd76d62807d Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 31 May 2022 21:57:13 +0300 Subject: [PATCH 32/54] Add tests for implicit view --- .../src/backing_implicit_view.rs | 320 +++++++++++++++++- 1 file changed, 308 insertions(+), 12 deletions(-) diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index 10dd0a1290d5..0f36d8030428 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -37,14 +37,14 @@ pub struct View { } // Minimum relay parents implicitly relative to a particular block. -#[derive(Clone)] +#[derive(Debug, Clone)] struct AllowedRelayParents { // minimum relay parents can only be fetched for active leaves, // so this will be empty for all blocks that haven't ever been // witnessed as active leaves. minimum_relay_parents: HashMap, // Ancestry, in descending order, starting from the block hash itself down - // to and including the minimum of `minimum_relay_parentes`. + // to and including the minimum of `minimum_relay_parents`. allowed_relay_parents_contiguous: Vec, } @@ -76,14 +76,14 @@ impl AllowedRelayParents { } } -#[derive(Clone)] +#[derive(Debug, Clone)] struct ActiveLeafPruningInfo { - // The mimimum block in the same branch of the relay-chain that should be + // The minimum block in the same branch of the relay-chain that should be // preserved. retain_minimum: BlockNumber, } -#[derive(Clone)] +#[derive(Debug, Clone)] struct BlockInfo { block_number: BlockNumber, // If this was previously an active leaf, this will be `Some` @@ -135,7 +135,7 @@ impl View { match res { Ok(fetched) => { - let retain_minimum = std::cmp::max( + let retain_minimum = std::cmp::min( fetched.minimum_ancestor_number, fetched.leaf_number.saturating_sub(MINIMUM_RETAIN_LENGTH), ); @@ -163,7 +163,7 @@ impl View { let minimum = self.leaves.values().map(|l| l.retain_minimum).min(); self.block_info_storage - .retain(|_, i| minimum.map_or(false, |m| i.block_number < m)); + .retain(|_, i| minimum.map_or(false, |m| i.block_number >= m)); } } @@ -207,7 +207,7 @@ impl View { pub enum FetchError { /// Leaf was already known. AlreadyKnown, - /// The prospective parachains subsystem was uavailable. + /// The prospective parachains subsystem was unavailable. ProspectiveParachainsUnavailable, /// A block header was unavailable. BlockHeaderUnavailable(Hash, BlockHeaderUnavailableReason), @@ -371,12 +371,308 @@ where #[cfg(test)] mod tests { use super::*; + use crate::TimeoutExt; + use assert_matches::assert_matches; + use futures::future::{join, FutureExt}; + use polkadot_node_subsystem::AllMessages; + use polkadot_node_subsystem_test_helpers::{ + make_subsystem_context, TestSubsystemContextHandle, + }; + use polkadot_overseer::SubsystemContext; + use polkadot_primitives::v2::Header; + use sp_core::testing::TaskExecutor; + use std::time::Duration; + + const PARA_A: ParaId = ParaId::new(0); + const PARA_B: ParaId = ParaId::new(1); + const PARA_C: ParaId = ParaId::new(2); + + const GENESIS_HASH: Hash = Hash::repeat_byte(0xFF); + const GENESIS_NUMBER: BlockNumber = 0; + + // Chains A and B are forks of genesis. + + const CHAIN_A: &[Hash] = + &[Hash::repeat_byte(0x01), Hash::repeat_byte(0x02), Hash::repeat_byte(0x03)]; + + const CHAIN_B: &[Hash] = &[ + Hash::repeat_byte(0x04), + Hash::repeat_byte(0x05), + Hash::repeat_byte(0x06), + Hash::repeat_byte(0x07), + Hash::repeat_byte(0x08), + Hash::repeat_byte(0x09), + ]; + + type VirtualOverseer = TestSubsystemContextHandle; + + const TIMEOUT: Duration = Duration::from_secs(2); + + async fn overseer_recv(virtual_overseer: &mut VirtualOverseer) -> AllMessages { + virtual_overseer + .recv() + .timeout(TIMEOUT) + .await + .expect("overseer `recv` timed out") + } - // TODO [now]: test update into fresh view, and that it constructs `AllowedRelayParents` correctly + fn default_header() -> Header { + Header { + parent_hash: Hash::zero(), + number: 0, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + } + } - // TODO [now]: test update that reuses some existing block info + fn get_block_header(chain: &[Hash], hash: &Hash) -> Option
{ + let idx = chain.iter().position(|h| h == hash)?; + let parent_hash = idx.checked_sub(1).map(|i| chain[i]).unwrap_or(GENESIS_HASH); + let number = + if *hash == GENESIS_HASH { GENESIS_NUMBER } else { GENESIS_NUMBER + idx as u32 + 1 }; + Some(Header { parent_hash, number, ..default_header() }) + } - // TODO [now]: test pruning + async fn assert_block_header_requests( + virtual_overseer: &mut VirtualOverseer, + chain: &[Hash], + blocks: &[Hash], + ) { + for block in blocks.iter().rev() { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(hash, tx) + ) => { + assert_eq!(*block, hash, "unexpected block header request"); + let header = if block == &GENESIS_HASH { + Header { + number: GENESIS_NUMBER, + ..default_header() + } + } else { + get_block_header(chain, block).expect("unknown block") + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + } + } + + async fn assert_min_relay_parents_request( + virtual_overseer: &mut VirtualOverseer, + leaf: &Hash, + response: Vec<(ParaId, u32)>, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents( + leaf_hash, + tx + ) + ) => { + assert_eq!(*leaf, leaf_hash, "received unexpected leaf hash"); + tx.send(response).unwrap(); + } + ); + } - // TODO [now]: test that former leaves still have `AllowedRelayParents` + #[test] + fn construct_fresh_view() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + // Chain B. + const PARA_A_MIN_PARENT: u32 = 4; + const PARA_B_MIN_PARENT: u32 = 3; + + let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT), (PARA_B, PARA_B_MIN_PARENT)]; + + let leaf = CHAIN_B.last().unwrap(); + let min_min_idx = (PARA_B_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_A, PARA_B]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + for i in min_min_idx..(CHAIN_B.len() - 1) { + // No allowed relay parents constructed for ancestry. + assert!(view.known_allowed_relay_parents_under(&CHAIN_B[i], None).is_none()); + } + + let leaf_info = + view.block_info_storage.get(leaf).expect("block must be present in storage"); + assert_matches!( + leaf_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], PARA_A_MIN_PARENT); + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_B], PARA_B_MIN_PARENT); + let expected_ancestry: Vec = + CHAIN_B[min_min_idx..].iter().rev().copied().collect(); + assert_eq!( + allowed_relay_parents.allowed_relay_parents_contiguous, + expected_ancestry + ); + } + ); + + // Suppose the whole test chain A is allowed up to genesis for para C. + const PARA_C_MIN_PARENT: u32 = 0; + let prospective_response = vec![(PARA_C, PARA_C_MIN_PARENT)]; + let leaf = CHAIN_A.last().unwrap(); + let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_C]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + assert_eq!(view.leaves.len(), 2); + } + + #[test] + fn reuse_block_info_storage() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + const PARA_A_MIN_PARENT: u32 = 1; + let leaf_a_number = 3; + let leaf_a = CHAIN_B[leaf_a_number - 1]; + let min_min_idx = (PARA_A_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; + + let fut = view.activate_leaf(ctx.sender(), leaf_a).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_A]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[min_min_idx..leaf_a_number], + ) + .await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Blocks up to the 3rd are present in storage. + const PARA_B_MIN_PARENT: u32 = 2; + let leaf_b_number = 5; + let leaf_b = CHAIN_B[leaf_b_number - 1]; + + let prospective_response = vec![(PARA_B, PARA_B_MIN_PARENT)]; + + let fut = view.activate_leaf(ctx.sender(), leaf_b).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_B]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await; + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[leaf_a_number..leaf_b_number], // Note the expected range. + ) + .await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Allowed relay parents for leaf A are preserved. + let leaf_a_info = + view.block_info_storage.get(&leaf_a).expect("block must be present in storage"); + assert_matches!( + leaf_a_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], PARA_A_MIN_PARENT); + let expected_ancestry: Vec = + CHAIN_B[min_min_idx..leaf_a_number].iter().rev().copied().collect(); + let ancestry = view.known_allowed_relay_parents_under(&leaf_a, Some(PARA_A)).unwrap().to_vec(); + assert_eq!(ancestry, expected_ancestry); + } + ); + } + + #[test] + fn pruning() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + const PARA_A_MIN_PARENT: u32 = 3; + let leaf_a = CHAIN_B.iter().rev().nth(1).unwrap(); + let leaf_a_idx = CHAIN_B.len() - 2; + let min_a_idx = (PARA_A_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; + + let fut = view + .activate_leaf(ctx.sender(), *leaf_a) + .timeout(TIMEOUT) + .map(|res| res.unwrap().unwrap()); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[min_a_idx..=leaf_a_idx], + ) + .await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Also activate a leaf with a lesser minimum relay parent. + const PARA_B_MIN_PARENT: u32 = 2; + let leaf_b = CHAIN_B.last().unwrap(); + let min_b_idx = (PARA_B_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let prospective_response = vec![(PARA_B, PARA_B_MIN_PARENT)]; + // Headers will be requested for the minimum block and the leaf. + let blocks = &[CHAIN_B[min_b_idx], *leaf_b]; + + let fut = view + .activate_leaf(ctx.sender(), *leaf_b) + .timeout(TIMEOUT) + .map(|res| res.expect("`activate_leaf` timed out").unwrap()); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, blocks).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Prune implicit ancestor (no-op). + let block_info_len = view.block_info_storage.len(); + view.deactivate_leaf(CHAIN_B[leaf_a_idx - 1]); + assert_eq!(block_info_len, view.block_info_storage.len()); + + // Prune a leaf with a greater minimum relay parent. + view.deactivate_leaf(*leaf_b); + for hash in CHAIN_B.iter().take(PARA_B_MIN_PARENT as usize) { + assert!(!view.block_info_storage.contains_key(hash)); + } + + // Prune the last leaf. + view.deactivate_leaf(*leaf_a); + assert!(view.block_info_storage.is_empty()); + } } From f9cb5a859e198908afd58a100ab8dc91b16b5b25 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Thu, 2 Jun 2022 13:22:50 +0300 Subject: [PATCH 33/54] Improve doc comments --- node/subsystem-util/src/backing_implicit_view.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index 0f36d8030428..dc10efe519fe 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -135,6 +135,9 @@ impl View { match res { Ok(fetched) => { + // Retain at least `MINIMUM_RETAIN_LENGTH` blocks in storage. + // This helps to avoid Chain API calls when activating leaves in the + // same chain. let retain_minimum = std::cmp::min( fetched.minimum_ancestor_number, fetched.leaf_number.saturating_sub(MINIMUM_RETAIN_LENGTH), @@ -167,7 +170,13 @@ impl View { } } - /// Get an iterator over all allowed relay-parents in the view. + /// Get an iterator over all allowed relay-parents in the view with no particular order. + /// + /// **Important**: not all blocks are guaranteed to be allowed for some leaves, it may + /// happen that a block info is only kept in the view storage because of a retaining rule. + /// + /// For getting relay-parents that are valid for parachain candidates use + /// [`View::known_allowed_relay_parents_under`]. pub fn all_allowed_relay_parents<'a>(&'a self) -> impl Iterator + 'a { self.block_info_storage.keys() } From 5104079bdf7227517877fe55343d0d080120d9ed Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Thu, 2 Jun 2022 15:45:12 +0300 Subject: [PATCH 34/54] Prospective parachains mode based on Runtime API version --- node/core/backing/src/error.rs | 3 +++ node/core/backing/src/lib.rs | 32 +++++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/node/core/backing/src/error.rs b/node/core/backing/src/error.rs index 2e10e75dad4e..13d33d852f60 100644 --- a/node/core/backing/src/error.rs +++ b/node/core/backing/src/error.rs @@ -45,6 +45,9 @@ pub enum Error { #[error("Fetching validation code by hash failed {0:?}, {1:?}")] FetchValidationCode(ValidationCodeHash, RuntimeApiError), + #[error("Fetching Runtime API version failed {0:?}")] + FetchRuntimeApiVersion(RuntimeApiError), + #[error("No validation code {0:?}")] NoValidationCode(ValidationCodeHash), diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 23f9881186eb..a22e95f7be8d 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -762,13 +762,31 @@ async fn handle_communication( Ok(()) } +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn prospective_parachains_mode( - _ctx: &mut Context, - _leaf_hash: Hash, -) -> ProspectiveParachainsMode { - // TODO [now]: this should be a runtime API version call - // cc https://github.com/paritytech/substrate/discussions/11338 - ProspectiveParachainsMode::Disabled + ctx: &mut Context, + leaf_hash: Hash, +) -> Result { + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) + .await; + + let response = rx.await.map_err(Error::RuntimeApiUnavailable)?; + + let version = response.map_err(Error::FetchRuntimeApiVersion)?; + + if version == 3 { + Ok(ProspectiveParachainsMode::Enabled) + } else { + if version != 2 { + gum::warn!( + target: LOG_TARGET, + "Runtime API version is {}, expected 2 or 3. Prospective parachains are disabled", + version + ); + } + Ok(ProspectiveParachainsMode::Disabled) + } } #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] @@ -787,7 +805,7 @@ async fn handle_active_leaves_update( let res = if let Some(leaf) = update.activated { // Only activate in implicit view if prospective // parachains are enabled. - let mode = prospective_parachains_mode(ctx, leaf.hash).await; + let mode = prospective_parachains_mode(ctx, leaf.hash).await?; let leaf_hash = leaf.hash; Some(( From b89b60528fe8a73a5400bc2f8af8566d580d4f35 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Thu, 2 Jun 2022 20:13:13 +0300 Subject: [PATCH 35/54] Add a TODO --- node/core/backing/src/lib.rs | 10 +++++++--- node/core/backing/src/tests.rs | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a22e95f7be8d..164666110854 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -767,13 +767,17 @@ async fn prospective_parachains_mode( ctx: &mut Context, leaf_hash: Hash, ) -> Result { + // TODO: call a Runtime API once staging version is available + // https://github.com/paritytech/substrate/discussions/11338 + let (tx, rx) = oneshot::channel(); ctx.send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) .await; - let response = rx.await.map_err(Error::RuntimeApiUnavailable)?; - - let version = response.map_err(Error::FetchRuntimeApiVersion)?; + let version = rx + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(Error::FetchRuntimeApiVersion)?; if version == 3 { Ok(ProspectiveParachainsMode::Enabled) diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index e7fbdf6a4e13..97f12d3a7ef4 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -23,6 +23,7 @@ use assert_matches::assert_matches; use futures::{future, Future}; use polkadot_node_primitives::{BlockData, InvalidCandidate}; use polkadot_node_subsystem::{ + jaeger, messages::{ AllMessages, CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest, ValidationFailed, From 05b98188bb0c15fa6a0ba7488642aa27b765b358 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Thu, 2 Jun 2022 23:23:53 +0300 Subject: [PATCH 36/54] Rework seconding_sanity_check --- node/core/backing/src/lib.rs | 56 ++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 164666110854..67ee2204b622 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -73,6 +73,7 @@ use std::{ use bitvec::vec::BitVec; use futures::{ channel::{mpsc, oneshot}, + future::BoxFuture, stream::FuturesOrdered, FutureExt, SinkExt, StreamExt, TryFutureExt, }; @@ -1113,6 +1114,7 @@ enum SecondingAllowed { async fn seconding_sanity_check( ctx: &mut Context, active_leaves: &HashMap, + implicit_view: &ImplicitView, candidate_hash: CandidateHash, candidate_para: ParaId, parent_head_data_hash: Hash, @@ -1133,21 +1135,44 @@ async fn seconding_sanity_check( } let mut membership = Vec::new(); - let mut responses = FuturesOrdered::new(); + let mut responses = FuturesOrdered::>>::new(); + for (head, leaf_state) in active_leaves { - let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalDepth( - HypotheticalDepthRequest { - candidate_hash, - candidate_para, - parent_head_data_hash, - candidate_relay_parent, - fragment_tree_relay_parent: *head, - }, - tx, - )) - .await; - responses.push(rx.map_ok(move |depths| (depths, head, leaf_state))); + if leaf_state.prospective_parachains_mode.is_enabled() { + // Check that the candidate relay parent is allowed for para, skip the + // leaf otherwise. + let allowed_parents_for_para = + implicit_view.known_allowed_relay_parents_under(head, Some(candidate_para)); + if !allowed_parents_for_para.unwrap_or_default().contains(&candidate_relay_parent) { + continue + } + + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalDepth( + HypotheticalDepthRequest { + candidate_hash, + candidate_para, + parent_head_data_hash, + candidate_relay_parent, + fragment_tree_relay_parent: *head, + }, + tx, + )) + .await; + responses.push(rx.map_ok(move |depths| (depths, head, leaf_state)).boxed()); + } else { + if head == &candidate_relay_parent { + if leaf_state.seconded_at_depth.contains_key(&0) { + // The leaf is already occupied. + return SecondingAllowed::No + } + responses.push(futures::future::ready(Ok((vec![0], head, leaf_state))).boxed()) + } + } + } + + if responses.is_empty() { + return SecondingAllowed::No } for response in responses.next().await { @@ -1217,11 +1242,12 @@ async fn handle_validated_candidate_command( let fragment_tree_membership = match seconding_sanity_check( ctx, &state.per_leaf, + &state.implicit_view, candidate_hash, candidate.descriptor().para_id, persisted_validation_data.parent_head.hash(), - candidate.descriptor().relay_parent, commitments.head_data.hash(), + candidate.descriptor().relay_parent, ) .await { From 4fbc65be8474b8a1b366f064334ccf55a9b63390 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 6 Jun 2022 11:28:41 +0300 Subject: [PATCH 37/54] Iterate over responses --- node/core/backing/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 67ee2204b622..a15d091eb497 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1166,7 +1166,7 @@ async fn seconding_sanity_check( // The leaf is already occupied. return SecondingAllowed::No } - responses.push(futures::future::ready(Ok((vec![0], head, leaf_state))).boxed()) + responses.push(futures::future::ok((vec![0], head, leaf_state)).boxed()); } } } @@ -1175,7 +1175,7 @@ async fn seconding_sanity_check( return SecondingAllowed::No } - for response in responses.next().await { + while let Some(response) = responses.next().await { match response { Err(oneshot::Canceled) => { gum::warn!( From ddc0d4df33cd3cb2befe4fe11a109a8739a9128e Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 6 Jun 2022 12:47:46 +0300 Subject: [PATCH 38/54] Update backing tests --- node/core/backing/src/tests.rs | 435 ++++++++++++++++++++++++++------- 1 file changed, 343 insertions(+), 92 deletions(-) diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index 97f12d3a7ef4..8cd505223b56 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -17,11 +17,11 @@ use super::*; use ::test_helpers::{ dummy_candidate_receipt_bad_sig, dummy_collator, dummy_collator_signature, - dummy_committed_candidate_receipt, dummy_hash, dummy_validation_code, + dummy_committed_candidate_receipt, dummy_hash, }; use assert_matches::assert_matches; use futures::{future, Future}; -use polkadot_node_primitives::{BlockData, InvalidCandidate}; +use polkadot_node_primitives::{BlockData, InvalidCandidate, SignedFullStatement, Statement}; use polkadot_node_subsystem::{ jaeger, messages::{ @@ -42,6 +42,10 @@ use sp_tracing as _; use statement_table::v2::Misbehavior; use std::collections::HashMap; +const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; + +const _API_VERSION_PROSPECTIVE_ENABLED: u32 = 3; + fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } @@ -54,6 +58,15 @@ fn table_statement_to_primitive(statement: TableStatement) -> Statement { } } +fn dummy_pvd() -> PersistedValidationData { + PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + } +} + struct TestState { chain_ids: Vec, keystore: SyncCryptoStorePtr, @@ -191,6 +204,8 @@ struct TestCandidateBuilder { pov_hash: Hash, relay_parent: Hash, erasure_root: Hash, + persisted_validation_data_hash: Hash, + validation_code: Vec, } impl TestCandidateBuilder { @@ -204,8 +219,8 @@ impl TestCandidateBuilder { collator: dummy_collator(), signature: dummy_collator_signature(), para_head: dummy_hash(), - validation_code_hash: dummy_validation_code().hash(), - persisted_validation_data_hash: dummy_hash(), + validation_code_hash: ValidationCode(self.validation_code).hash(), + persisted_validation_data_hash: self.persisted_validation_data_hash, }, commitments: CandidateCommitments { head_data: self.head_data, @@ -233,6 +248,17 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS )))) .await; + // Prospective parachains mode is temporarily defined by the Runtime API version. + // Disable it for the test leaf. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + } + ); + // Check that subsystem job issues a request for a validator set. assert_matches!( virtual_overseer.recv().await, @@ -311,6 +337,8 @@ fn backing_second_works() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -321,6 +349,8 @@ fn backing_second_works() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -328,31 +358,50 @@ fn backing_second_works() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate.to_plain(), + pvd.clone(), pov.clone(), ); virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, candidate_receipt, - pov, + _pov, timeout, tx, - ) - ) if pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok( - ValidationResult::Valid(CandidateCommitments { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { head_data: expected_head_data.clone(), horizontal_messages: Vec::new(), upward_messages: Vec::new(), new_validation_code: None, processed_downward_messages: 0, hrmp_watermark: 0, - }, test_state.validation_data.clone()), - )).unwrap(); + }, + test_state.validation_data.clone(), + ))) + .unwrap(); } ); @@ -408,6 +457,8 @@ fn backing_works() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -419,6 +470,7 @@ fn backing_works() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -441,9 +493,9 @@ fn backing_works() { .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -453,9 +505,9 @@ fn backing_works() { .flatten() .expect("should be signed"); - let signed_b = SignedFullStatement::sign( + let signed_b = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(5), &public1.into(), @@ -478,6 +530,15 @@ fn backing_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is the PoV. assert_matches!( @@ -498,13 +559,20 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate_a_commitments_hash=> { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_a_commitments_hash == candidate_receipt.commitments_hash => + { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -585,6 +653,8 @@ fn backing_works_while_validation_ongoing() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -596,6 +666,7 @@ fn backing_works_while_validation_ongoing() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -625,9 +696,9 @@ fn backing_works_while_validation_ongoing() { .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -637,9 +708,9 @@ fn backing_works_while_validation_ongoing() { .flatten() .expect("should be signed"); - let signed_b = SignedFullStatement::sign( + let signed_b = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(5), &public1.into(), @@ -649,9 +720,9 @@ fn backing_works_while_validation_ongoing() { .flatten() .expect("should be signed"); - let signed_c = SignedFullStatement::sign( + let signed_c = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(3), &public3.into(), @@ -673,6 +744,15 @@ fn backing_works_while_validation_ongoing() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is PoV from the // `PoVDistribution`. @@ -694,13 +774,20 @@ fn backing_works_while_validation_ongoing() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate_a_commitments_hash == c.commitments_hash => { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_a_commitments_hash == candidate_receipt.commitments_hash => + { // we never validate the candidate. our local node // shouldn't issue any statements. std::mem::forget(tx); @@ -794,6 +881,8 @@ fn backing_misbehavior_works() { let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pov_hash = pov.hash(); + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -803,6 +892,7 @@ fn backing_misbehavior_works() { pov_hash, erasure_root: make_erasure_root(&test_state, pov.clone()), head_data: expected_head_data.clone(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -817,9 +907,9 @@ fn backing_misbehavior_works() { ) .await .expect("Insert key into keystore"); - let seconded_2 = SignedFullStatement::sign( + let seconded_2 = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -829,9 +919,9 @@ fn backing_misbehavior_works() { .flatten() .expect("should be signed"); - let valid_2 = SignedFullStatement::sign( + let valid_2 = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -854,6 +944,15 @@ fn backing_misbehavior_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityDistribution( @@ -870,13 +969,20 @@ fn backing_misbehavior_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate_a_commitments_hash == c.commitments_hash => { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_a_commitments_hash == candidate_receipt.commitments_hash => + { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -992,8 +1098,17 @@ fn backing_dont_second_invalid() { test_startup(&mut virtual_overseer, &test_state).await; let pov_block_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); let pov_block_b = PoV { block_data: BlockData(vec![45, 46, 47]) }; + let pvd_b = { + let mut pvd_b = pvd_a.clone(); + pvd_b.parent_head = HeadData(vec![14, 15, 16]); + pvd_b.max_pov_size = pvd_a.max_pov_size / 2; + pvd_b + }; + let validation_code_b = ValidationCode(vec![4, 5, 6]); let pov_hash_a = pov_block_a.hash(); let pov_hash_b = pov_block_b.hash(); @@ -1005,6 +1120,8 @@ fn backing_dont_second_invalid() { relay_parent: test_state.relay_parent, pov_hash: pov_hash_a, erasure_root: make_erasure_root(&test_state, pov_block_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -1015,6 +1132,8 @@ fn backing_dont_second_invalid() { pov_hash: pov_hash_b, erasure_root: make_erasure_root(&test_state, pov_block_b.clone()), head_data: expected_head_data.clone(), + persisted_validation_data_hash: pvd_b.hash(), + validation_code: validation_code_b.0.clone(), ..Default::default() } .build(); @@ -1022,21 +1141,38 @@ fn backing_dont_second_invalid() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate_a.to_plain(), + pvd_a.clone(), pov_block_a.clone(), ); virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code_a.hash() => { + tx.send(Ok(Some(validation_code_a.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { + ), + ) if _pvd == pvd_a && + _validation_code == validation_code_a && + *_pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_a.commitments.hash() == candidate_receipt.commitments_hash => + { tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1051,21 +1187,38 @@ fn backing_dont_second_invalid() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate_b.to_plain(), + pvd_b.clone(), pov_block_b.clone(), ); virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code_b.hash() => { + tx.send(Ok(Some(validation_code_b.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_b.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { + ), + ) if _pvd == pvd_b && + _validation_code == validation_code_b && + *_pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_b.commitments.hash() == candidate_receipt.commitments_hash => + { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -1126,6 +1279,8 @@ fn backing_second_after_first_fails_works() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1134,6 +1289,8 @@ fn backing_second_after_first_fails_works() { relay_parent: test_state.relay_parent, pov_hash, erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1146,9 +1303,9 @@ fn backing_second_after_first_fails_works() { .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &validator2.into(), @@ -1172,6 +1329,15 @@ fn backing_second_after_first_fails_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Subsystem requests PoV and requests validation. assert_matches!( virtual_overseer.recv().await, @@ -1190,13 +1356,20 @@ fn backing_second_after_first_fails_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash() => { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1206,12 +1379,15 @@ fn backing_second_after_first_fails_works() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate.to_plain(), + pvd.clone(), pov.clone(), ); virtual_overseer.send(FromOverseer::Communication { msg: second }).await; let pov_to_second = PoV { block_data: BlockData(vec![3, 2, 1]) }; + let pvd_to_second = dummy_pvd(); + let validation_code_to_second = ValidationCode(vec![5, 6, 7]); let pov_hash = pov_to_second.hash(); @@ -1220,6 +1396,8 @@ fn backing_second_after_first_fails_works() { relay_parent: test_state.relay_parent, pov_hash, erasure_root: make_erasure_root(&test_state, pov_to_second.clone()), + persisted_validation_data_hash: pvd_to_second.hash(), + validation_code: validation_code_to_second.0.clone(), ..Default::default() } .build(); @@ -1227,6 +1405,7 @@ fn backing_second_after_first_fails_works() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate_to_second.to_plain(), + pvd_to_second.clone(), pov_to_second.clone(), ); @@ -1235,15 +1414,19 @@ fn backing_second_after_first_fails_works() { // triggered on the prev step. virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code_to_second.hash() => { + tx.send(Ok(Some(validation_code_to_second.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - _, - pov, - _, - _, - ) + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, pov, ..), ) => { assert_eq!(&*pov, &pov_to_second); } @@ -1261,6 +1444,8 @@ fn backing_works_after_failed_validation() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1269,6 +1454,7 @@ fn backing_works_after_failed_validation() { relay_parent: test_state.relay_parent, pov_hash, erasure_root: make_erasure_root(&test_state, pov.clone()), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1280,9 +1466,9 @@ fn backing_works_after_failed_validation() { ) .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1306,6 +1492,15 @@ fn backing_works_after_failed_validation() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Subsystem requests PoV and requests validation. assert_matches!( virtual_overseer.recv().await, @@ -1324,13 +1519,20 @@ fn backing_works_after_failed_validation() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash() => { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { tx.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); } ); @@ -1353,6 +1555,7 @@ fn backing_works_after_failed_validation() { // Test that a `CandidateBackingMessage::Second` issues validation work // and in case validation is successful issues a `StatementDistributionMessage`. #[test] +#[ignore] // `required_collator` is disabled. fn backing_doesnt_second_wrong_collator() { let mut test_state = TestState::default(); test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { @@ -1364,6 +1567,8 @@ fn backing_doesnt_second_wrong_collator() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -1374,6 +1579,8 @@ fn backing_doesnt_second_wrong_collator() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1381,6 +1588,7 @@ fn backing_doesnt_second_wrong_collator() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate.to_plain(), + pvd.clone(), pov.clone(), ); @@ -1404,6 +1612,7 @@ fn backing_doesnt_second_wrong_collator() { } #[test] +#[ignore] // `required_collator` is disabled. fn validation_work_ignores_wrong_collator() { let mut test_state = TestState::default(); test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { @@ -1415,6 +1624,8 @@ fn validation_work_ignores_wrong_collator() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1426,6 +1637,8 @@ fn validation_work_ignores_wrong_collator() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1437,9 +1650,9 @@ fn validation_work_ignores_wrong_collator() { ) .await .expect("Insert key into keystore"); - let seconding = SignedFullStatement::sign( + let seconding = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1544,6 +1757,8 @@ fn retry_works() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1552,6 +1767,8 @@ fn retry_works() { relay_parent: test_state.relay_parent, pov_hash, erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1577,9 +1794,9 @@ fn retry_works() { ) .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1588,9 +1805,9 @@ fn retry_works() { .ok() .flatten() .expect("should be signed"); - let signed_b = SignedFullStatement::sign( + let signed_b = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate.hash()), + StatementWithPVD::Valid(candidate.hash()), &test_state.signing_context, ValidatorIndex(3), &public3.into(), @@ -1599,9 +1816,9 @@ fn retry_works() { .ok() .flatten() .expect("should be signed"); - let signed_c = SignedFullStatement::sign( + let signed_c = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate.hash()), + StatementWithPVD::Valid(candidate.hash()), &test_state.signing_context, ValidatorIndex(5), &public5.into(), @@ -1624,6 +1841,15 @@ fn retry_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Subsystem requests PoV and requests validation. // We cancel - should mean retry on next backing statement. assert_matches!( @@ -1652,7 +1878,7 @@ fn retry_works() { .await; // Not deterministic which message comes first: - for _ in 0u32..2 { + for _ in 0u32..3 { match virtual_overseer.recv().await { AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( _, @@ -1665,6 +1891,12 @@ fn retry_works() { ) if relay_parent == test_state.relay_parent => { std::mem::drop(tx); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ValidationCodeByHash(hash, tx), + )) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + }, msg => { assert!(false, "Unexpected message: {:?}", msg); }, @@ -1683,6 +1915,15 @@ fn retry_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityDistribution( @@ -1701,13 +1942,19 @@ fn retry_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, - _tx, - ) - ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash() + .. + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash ); virtual_overseer }); @@ -1721,6 +1968,8 @@ fn observes_backing_even_if_not_validator() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1732,6 +1981,8 @@ fn observes_backing_even_if_not_validator() { pov_hash, head_data: expected_head_data.clone(), erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1761,9 +2012,9 @@ fn observes_backing_even_if_not_validator() { // Produce a 3-of-5 quorum on the candidate. - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(0), &public0.into(), @@ -1773,9 +2024,9 @@ fn observes_backing_even_if_not_validator() { .flatten() .expect("should be signed"); - let signed_b = SignedFullStatement::sign( + let signed_b = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(5), &public1.into(), @@ -1785,9 +2036,9 @@ fn observes_backing_even_if_not_validator() { .flatten() .expect("should be signed"); - let signed_c = SignedFullStatement::sign( + let signed_c = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(2), &public2.into(), From d878c895acb76fc52bf3388ed01878b4ab09c3ad Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 31 May 2022 18:22:20 -0500 Subject: [PATCH 39/54] collator-protocol: load PVD from runtime --- .../src/validator_side/mod.rs | 60 ++++++++++++++++++- 1 file changed, 57 insertions(+), 3 deletions(-) diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 3ebc253edb78..4d5f6b8b05db 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -53,7 +53,10 @@ use polkadot_node_subsystem::{ overseer, FromOverseer, OverseerSignal, PerLeafSpan, SubsystemSender, }; use polkadot_node_subsystem_util::metrics::{self, prometheus}; -use polkadot_primitives::v2::{CandidateReceipt, CollatorId, Hash, Id as ParaId}; +use polkadot_primitives::v2::{ + CandidateReceipt, CollatorId, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, +}; use crate::error::Result; @@ -1307,6 +1310,39 @@ async fn dequeue_next_collation_and_fetch( } } +#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] +async fn request_persisted_validation_data( + ctx: &mut Context, + relay_parent: Hash, + para_id: ParaId, +) -> Option { + // TODO [https://github.com/paritytech/polkadot/issues/5054] + // + // As of https://github.com/paritytech/polkadot/pull/5557 the + // `Second` message requires the `PersistedValidationData` to be + // supplied. + // + // Without asynchronous backing, this can be easily fetched from the + // chain state. + // + // This assumes the core is _scheduled_, in keeping with the effective + // current behavior. If the core is occupied, we simply don't return + // anything. Likewise with runtime API errors, which are rare. + let res = polkadot_node_subsystem_util::request_persisted_validation_data( + relay_parent, + para_id, + OccupiedCoreAssumption::Free, + ctx.sender(), + ) + .await + .await; + + match res { + Ok(Ok(Some(pvd))) => Some(pvd), + _ => None, + } +} + /// Handle a fetched collation result. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn handle_collation_fetched_result( @@ -1351,13 +1387,31 @@ async fn handle_collation_fetched_result( if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) { collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash); - ctx.sender() - .send_message(CandidateBackingMessage::Second( + + if let Some(pvd) = request_persisted_validation_data( + ctx, + candidate_receipt.descriptor().relay_parent, + candidate_receipt.descriptor().para_id, + ) + .await + { + // TODO [https://github.com/paritytech/polkadot/issues/5054] + // + // If PVD isn't available (core occupied) then we'll silently + // just not second this. But prior to asynchronous backing + // we wouldn't second anyway because the core is occupied. + // + // The proper refactoring would be to accept declares from collators + // but not even fetch from them if the core is occupied. Given 5054, + // there's no reason to do this right now. + ctx.send_message(CandidateBackingMessage::Second( relay_parent.clone(), candidate_receipt, + pvd, pov, )) .await; + } entry.insert(collation_event); } else { From 39c194eb4ac1c983a71112f84a5118809ebbf991 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 6 Jun 2022 21:11:38 +0300 Subject: [PATCH 40/54] Fix validator side tests --- .../src/validator_side/tests.rs | 48 +++++++++++++++---- 1 file changed, 39 insertions(+), 9 deletions(-) diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests.rs index 6227b30551a4..4e69d66b73b1 100644 --- a/node/network/collator-protocol/src/validator_side/tests.rs +++ b/node/network/collator-protocol/src/validator_side/tests.rs @@ -32,8 +32,8 @@ use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeA use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v2::{ - CollatorPair, CoreState, GroupIndex, GroupRotationInfo, OccupiedCore, ScheduledCore, - ValidatorId, ValidatorIndex, + CollatorPair, CoreState, GroupIndex, GroupRotationInfo, HeadData, OccupiedCore, + PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ dummy_candidate_descriptor, dummy_candidate_receipt_bad_sig, dummy_hash, @@ -245,15 +245,45 @@ async fn assert_candidate_backing_second( expected_para_id: ParaId, expected_pov: &PoV, ) -> CandidateReceipt { + // TODO [https://github.com/paritytech/polkadot/issues/5054] + // + // While collator protocol isn't updated, it's expected to receive + // a Runtime API request for persisted validation data. + let pvd = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + assert_matches!( overseer_recv(virtual_overseer).await, - AllMessages::CandidateBacking(CandidateBackingMessage::Second(relay_parent, candidate_receipt, incoming_pov) - ) => { - assert_eq!(expected_relay_parent, relay_parent); - assert_eq!(expected_para_id, candidate_receipt.descriptor.para_id); - assert_eq!(*expected_pov, incoming_pov); - candidate_receipt - }) + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) => { + assert_eq!(expected_relay_parent, hash); + assert_eq!(expected_para_id, para_id); + assert_eq!(OccupiedCoreAssumption::Free, assumption); + tx.send(Ok(Some(pvd.clone()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CandidateBacking(CandidateBackingMessage::Second( + relay_parent, + candidate_receipt, + received_pvd, + incoming_pov, + )) => { + assert_eq!(expected_relay_parent, relay_parent); + assert_eq!(expected_para_id, candidate_receipt.descriptor.para_id); + assert_eq!(*expected_pov, incoming_pov); + assert_eq!(pvd, received_pvd); + candidate_receipt + } + ) } /// Assert that a collator got disconnected. From ac7a67e9535c85c478fdc2dec77318ca40d25030 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 8 Jun 2022 00:55:26 +0300 Subject: [PATCH 41/54] Update statement-distribution to fetch PVD --- .../statement-distribution/src/error.rs | 12 ++- .../network/statement-distribution/src/lib.rs | 90 +++++++++++++++++-- 2 files changed, 91 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index 01b2efd53b86..f91b0980c966 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -18,9 +18,11 @@ //! Error handling related code and Error/Result definitions. use polkadot_node_network_protocol::PeerId; -use polkadot_node_subsystem::SubsystemError; +use polkadot_node_subsystem::{RuntimeApiError, SubsystemError}; use polkadot_node_subsystem_util::runtime; -use polkadot_primitives::v2::{CandidateHash, Hash}; +use polkadot_primitives::v2::{CandidateHash, Hash, Id as ParaId}; + +use futures::channel::oneshot; use crate::LOG_TARGET; @@ -56,6 +58,12 @@ pub enum Error { #[error("Error while accessing runtime information")] Runtime(#[from] runtime::Error), + #[error("RuntimeAPISubsystem channel closed before receipt")] + RuntimeApiUnavailable(#[source] oneshot::Canceled), + + #[error("Fetching persisted validation data for para {0:?}, {1:?}")] + FetchPersistedValidationData(ParaId, RuntimeApiError), + #[error("Relay parent could not be found in active heads")] NoSuchHead(Hash), diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 00759fd9e95d..e68766f1f933 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -33,7 +33,9 @@ use polkadot_node_network_protocol::{ v1::{self as protocol_v1, StatementMetadata}, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; -use polkadot_node_primitives::{SignedFullStatement, Statement, UncheckedSignedFullStatement}; +use polkadot_node_primitives::{ + SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, +}; use polkadot_node_subsystem_util::{self as util, rand, MIN_GOSSIP_PEERS}; use polkadot_node_subsystem::{ @@ -43,12 +45,12 @@ use polkadot_node_subsystem::{ StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan, SpawnedSubsystem, - SubsystemError, + StatementDistributionSenderTrait, SubsystemError, }; use polkadot_primitives::v2::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash, - SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, - ValidatorSignature, + Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, SignedStatement, SigningContext, + UncheckedSignedStatement, ValidatorId, ValidatorIndex, ValidatorSignature, }; use futures::{ @@ -657,6 +659,8 @@ enum DeniedStatement { struct ActiveHeadData { /// All candidates we are aware of for this head, keyed by hash. candidates: HashSet, + /// Persisted validation data cache. + cached_validation_data: HashMap, /// Stored statements for circulation to peers. /// /// These are iterable in insertion order, and `Seconded` statements are always @@ -682,6 +686,7 @@ impl ActiveHeadData { ) -> Self { ActiveHeadData { candidates: Default::default(), + cached_validation_data: Default::default(), statements: Default::default(), waiting_large_statements: Default::default(), validators, @@ -691,6 +696,37 @@ impl ActiveHeadData { } } + async fn fetch_persisted_validation_data( + &mut self, + sender: &mut Sender, + relay_parent: Hash, + para_id: ParaId, + ) -> Result> + where + Sender: StatementDistributionSenderTrait, + { + if let Entry::Vacant(entry) = self.cached_validation_data.entry(para_id) { + let persisted_validation_data = + polkadot_node_subsystem_util::request_persisted_validation_data( + relay_parent, + para_id, + OccupiedCoreAssumption::Free, + sender, + ) + .await + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(|err| Error::FetchPersistedValidationData(para_id, err))?; + + match persisted_validation_data { + Some(pvd) => entry.insert(pvd), + None => return Ok(None), + }; + } + + Ok(self.cached_validation_data.get(¶_id)) + } + /// Note the given statement. /// /// If it was not already known and can be accepted, returns `NotedStatement::Fresh`, @@ -1554,6 +1590,45 @@ async fn handle_incoming_message<'a, Context>( Ok(false) => {}, } + // TODO [https://github.com/paritytech/polkadot/issues/5055] + // + // For `Seconded` statements `None` or `Err` means we couldn't fetch the PVD, which + // means the statement shouldn't be accepted. + // + // In case of `Valid` we should have it cached prior, therefore this performs + // no Runtime API calls and always returns `Ok(Some(_))`. + if let Statement::Seconded(receipt) = statement.payload() { + let para_id = receipt.descriptor.para_id; + // Either call the Runtime API or check that validation data is cached. + let result = active_head + .fetch_persisted_validation_data(ctx.sender(), relay_parent, para_id) + .await; + if !matches!(result, Ok(Some(_))) { + return None + } + } + + // Extend the payload with persisted validation data required by the backing + // subsystem. + // + // Do it in advance before noting the statement because we don't want to borrow active + // head mutable and use the cache. + let statement_with_pvd = statement + .clone() + .convert_to_superpayload_with(|statement| match statement { + Statement::Seconded(receipt) => { + let para_id = &receipt.descriptor.para_id; + let persisted_validation_data = active_head + .cached_validation_data + .get(para_id) + .cloned() + .expect("pvd is ensured to be cached above; qed"); + StatementWithPVD::Seconded(receipt, persisted_validation_data) + }, + Statement::Valid(candidate_hash) => StatementWithPVD::Valid(candidate_hash), + }) + .expect("payload was checked with conversion from compact; qed"); + // Note: `peer_data.receive` already ensures that the statement is not an unbounded equivocation // or unpinned to a seconded candidate. So it is safe to place it into the storage. match active_head.note_statement(statement) { @@ -1567,11 +1642,8 @@ async fn handle_incoming_message<'a, Context>( // When we receive a new message from a peer, we forward it to the // candidate backing subsystem. - ctx.send_message(CandidateBackingMessage::Statement( - relay_parent, - statement.statement.clone(), - )) - .await; + ctx.send_message(CandidateBackingMessage::Statement(relay_parent, statement_with_pvd)) + .await; Some((relay_parent, statement)) }, From e7bfac8e71025c2224bda03a2ed27466601600f4 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Thu, 9 Jun 2022 15:01:55 +0300 Subject: [PATCH 42/54] Fix statement-distribution tests --- .../statement-distribution/src/tests.rs | 91 +++++++++++++++++-- 1 file changed, 82 insertions(+), 9 deletions(-) diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index a7405d329971..aa48d8619390 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -26,14 +26,16 @@ use polkadot_node_network_protocol::{ }, view, ObservedRole, }; -use polkadot_node_primitives::{Statement, UncheckedSignedFullStatement}; +use polkadot_node_primitives::{ + SignedFullStatementWithPVD, Statement, UncheckedSignedFullStatement, +}; use polkadot_node_subsystem::{ jaeger, messages::{network_bridge_event, AllMessages, RuntimeApiMessage, RuntimeApiRequest}, ActivatedLeaf, LeafStatus, }; use polkadot_node_subsystem_test_helpers::mock::make_ferdie_keystore; -use polkadot_primitives::v2::{Hash, SessionInfo, ValidationCode}; +use polkadot_primitives::v2::{Hash, HeadData, SessionInfo, ValidationCode}; use polkadot_primitives_test_helpers::{ dummy_committed_candidate_receipt, dummy_hash, AlwaysZeroRng, }; @@ -44,6 +46,27 @@ use sp_keyring::Sr25519Keyring; use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; use std::{iter::FromIterator as _, sync::Arc, time::Duration}; +fn dummy_pvd() -> PersistedValidationData { + PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + } +} + +fn extend_statement_with_pvd( + statement: SignedFullStatement, + pvd: PersistedValidationData, +) -> SignedFullStatementWithPVD { + statement + .convert_to_superpayload_with(|statement| match statement { + Statement::Seconded(receipt) => StatementWithPVD::Seconded(receipt, pvd), + Statement::Valid(candidate_hash) => StatementWithPVD::Valid(candidate_hash), + }) + .unwrap() +} + #[test] fn active_head_accepts_only_2_seconded_per_validator() { let validators = vec![ @@ -699,12 +722,14 @@ fn circulated_statement_goes_to_all_peers_with_view() { #[test] fn receiving_from_one_sends_to_another_and_to_candidate_backing() { + const PARA_ID: ParaId = ParaId::new(1); let hash_a = Hash::repeat_byte(1); + let pvd = dummy_pvd(); let candidate = { let mut c = dummy_committed_candidate_receipt(dummy_hash()); c.descriptor.relay_parent = hash_a; - c.descriptor.para_id = 1.into(); + c.descriptor.para_id = PARA_ID; c }; @@ -845,18 +870,32 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { }) .await; + let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone()); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) if para_id == PARA_ID && + assumption == OccupiedCoreAssumption::Free && + hash == hash_a => + { + tx.send(Ok(Some(pvd))).unwrap(); + } + ); + assert_matches!( handle.recv().await, AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(p, r) ) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => {} ); - assert_matches!( handle.recv().await, AllMessages::CandidateBacking( CandidateBackingMessage::Statement(r, s) - ) if r == hash_a && s == statement => {} + ) if r == hash_a && s == statement_with_pvd => {} ); assert_matches!( @@ -885,6 +924,9 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { #[test] fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing() { + const PARA_ID: ParaId = ParaId::new(1); + let pvd = dummy_pvd(); + sp_tracing::try_init_simple(); let hash_a = Hash::repeat_byte(1); let hash_b = Hash::repeat_byte(2); @@ -892,7 +934,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( let candidate = { let mut c = dummy_committed_candidate_receipt(dummy_hash()); c.descriptor.relay_parent = hash_a; - c.descriptor.para_id = 1.into(); + c.descriptor.para_id = PARA_ID; c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3])); c }; @@ -1274,6 +1316,20 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( ) if p == peer_c && r == BENEFIT_VALID_RESPONSE => {} ); + let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone()); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) if para_id == PARA_ID && + assumption == OccupiedCoreAssumption::Free && + hash == hash_a => + { + tx.send(Ok(Some(pvd))).unwrap(); + } + ); assert_matches!( handle.recv().await, AllMessages::NetworkBridge( @@ -1285,7 +1341,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( handle.recv().await, AllMessages::CandidateBacking( CandidateBackingMessage::Statement(r, s) - ) if r == hash_a && s == statement => {} + ) if r == hash_a && s == statement_with_pvd => {} ); // Now messages should go out: @@ -1887,6 +1943,7 @@ fn peer_cant_flood_with_large_statements() { #[test] fn handle_multiple_seconded_statements() { let relay_parent_hash = Hash::repeat_byte(1); + let pvd = dummy_pvd(); let candidate = dummy_committed_candidate_receipt(relay_parent_hash); let candidate_hash = candidate.hash(); @@ -2086,6 +2143,18 @@ fn handle_multiple_seconded_statements() { }) .await; + let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone()); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::PersistedValidationData(_, assumption, tx), + )) if assumption == OccupiedCoreAssumption::Free => { + tx.send(Ok(Some(pvd.clone()))).unwrap(); + } + ); + assert_matches!( handle.recv().await, AllMessages::NetworkBridge( @@ -2103,7 +2172,7 @@ fn handle_multiple_seconded_statements() { CandidateBackingMessage::Statement(r, s) ) => { assert_eq!(r, relay_parent_hash); - assert_eq!(s, statement); + assert_eq!(s, statement_with_pvd); } ); @@ -2189,6 +2258,10 @@ fn handle_multiple_seconded_statements() { }) .await; + let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone()); + + // Persisted validation data is cached. + assert_matches!( handle.recv().await, AllMessages::NetworkBridge( @@ -2205,7 +2278,7 @@ fn handle_multiple_seconded_statements() { CandidateBackingMessage::Statement(r, s) ) => { assert_eq!(r, relay_parent_hash); - assert_eq!(s, statement); + assert_eq!(s, statement_with_pvd); } ); From 45deab2bcca2f3a262532bf07d4117d949f16ba8 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Sat, 11 Jun 2022 00:45:47 +0300 Subject: [PATCH 43/54] Backing tests with prospective paras #1 --- .../backing/src/{tests.rs => tests/mod.rs} | 8 +- .../src/tests/prospective_parachains.rs | 334 ++++++++++++++++++ node/subsystem-types/src/messages.rs | 2 +- 3 files changed, 339 insertions(+), 5 deletions(-) rename node/core/backing/src/{tests.rs => tests/mod.rs} (99%) create mode 100644 node/core/backing/src/tests/prospective_parachains.rs diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests/mod.rs similarity index 99% rename from node/core/backing/src/tests.rs rename to node/core/backing/src/tests/mod.rs index 8cd505223b56..0f0a319f5bf6 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests/mod.rs @@ -42,9 +42,9 @@ use sp_tracing as _; use statement_table::v2::Misbehavior; use std::collections::HashMap; -const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; +mod prospective_parachains; -const _API_VERSION_PROSPECTIVE_ENABLED: u32 = 3; +const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() @@ -61,9 +61,9 @@ fn table_statement_to_primitive(statement: TableStatement) -> Statement { fn dummy_pvd() -> PersistedValidationData { PersistedValidationData { parent_head: HeadData(vec![7, 8, 9]), - relay_parent_number: 5, + relay_parent_number: 0_u32.into(), max_pov_size: 1024, - relay_parent_storage_root: Default::default(), + relay_parent_storage_root: dummy_hash(), } } diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs new file mode 100644 index 000000000000..493a0bd84a1c --- /dev/null +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -0,0 +1,334 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the backing subsystem with enabled prospective parachains. + +use polkadot_node_subsystem::messages::ChainApiMessage; +use polkadot_primitives::v2::{BlockNumber, Header}; + +use super::*; + +const API_VERSION_PROSPECTIVE_ENABLED: u32 = 3; + +struct TestLeaf { + activated: ActivatedLeaf, + min_relay_parents: Vec<(ParaId, u32)>, +} + +fn get_parent_hash(hash: Hash) -> Hash { + Hash::from_low_u64_be(hash.to_low_u64_be() + 1) +} + +async fn activate_leaf( + virtual_overseer: &mut VirtualOverseer, + leaf: TestLeaf, + test_state: &TestState, +) { + let TestLeaf { activated, min_relay_parents } = leaf; + let leaf_hash = activated.hash; + let leaf_number = activated.number; + // Start work on some new parent. + virtual_overseer + .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + // Prospective parachains mode is temporarily defined by the Runtime API version. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == leaf_hash => { + tx.send(Ok(API_VERSION_PROSPECTIVE_ENABLED)).unwrap(); + } + ); + + let min_min = *min_relay_parents + .iter() + .map(|(_, block_num)| block_num) + .min() + .unwrap_or(&leaf_number); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == leaf_hash => { + tx.send(min_relay_parents).unwrap(); + } + ); + + let ancestry_len = leaf_number + 1 - min_min; + + let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) + .take(ancestry_len as usize); + let ancestry_numbers = (min_min..=leaf_number).rev(); + let mut ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable(); + + loop { + let (hash, number) = match ancestry_iter.next() { + Some((hash, number)) => (hash, number), + None => break, + }; + + // May be `None` for the last element. + let parent_hash = + ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(_hash, tx) + ) if _hash == hash => { + let header = Header { + parent_hash, + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + } + + for hash in ancestry_hashes { + // Check that subsystem job issues a request for a validator set. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the validator groups. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the session index for child. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.signing_context.session_index)).unwrap(); + } + ); + + // Check that subsystem job issues a request for the availability cores. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + } +} + +// Test that `seconding_sanity_check` works when a candidate is allowed +// for all leaves. +#[test] +fn seconding_sanity_check_allowed() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_a_hash = Hash::from_low_u64_be(128); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = ActivatedLeaf { + hash: leaf_a_hash, + number: LEAF_A_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = 99; + const LEAF_B_DEPTH: BlockNumber = 2; + + let leaf_b_hash = Hash::from_low_u64_be(256); + let activated = ActivatedLeaf { + hash: leaf_b_hash, + number: LEAF_B_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + // activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if parent == leaf_a_parent && hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, + timeout, + tx, + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + test_state.validation_data.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + // `seconding_sanity_check` + let expected_request = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_a_hash, + }; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::GetHypotheticalDepth( + request, + tx, + )) if request == expected_request => { + tx.send(vec![0, 1, 2, 3]).unwrap(); + } + ); + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(leaf_a_hash), + ))) + .await; + virtual_overseer + }); +} diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index c673f679b7fd..db54ddd7be0b 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -942,7 +942,7 @@ pub enum PvfCheckerMessage {} /// A request for the depths a hypothetical candidate would occupy within /// some fragment tree. -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct HypotheticalDepthRequest { /// The hash of the potential candidate. pub candidate_hash: CandidateHash, From 59e31b43c430dc2580bc22d83b6c4ea552357207 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 31 May 2022 14:24:22 -0500 Subject: [PATCH 44/54] fix per_relay_parent pruning in backing --- node/core/backing/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a15d091eb497..614d13313ecb 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -837,8 +837,9 @@ async fn handle_active_leaves_update( // // when prospective parachains are disabled, the implicit view is empty, // which means we'll clean up everything. This is correct. - for relay_parent in state.implicit_view.all_allowed_relay_parents() { - state.per_relay_parent.remove(relay_parent); + { + let remaining: HashSet<_> = state.implicit_view.all_allowed_relay_parents().collect(); + state.per_relay_parent.retain(|r, _| remainig.contains(&r)); } // clean up `per_candidate` according to which relay-parents From 23b4b3ca28aade99378c33b1aa913f8c15ae2945 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Sat, 11 Jun 2022 22:29:13 +0300 Subject: [PATCH 45/54] Test multiple leaves --- node/core/backing/src/lib.rs | 2 +- .../src/tests/prospective_parachains.rs | 86 +++++++++++++++---- 2 files changed, 69 insertions(+), 19 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 614d13313ecb..2dddf57ac8d6 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -839,7 +839,7 @@ async fn handle_active_leaves_update( // which means we'll clean up everything. This is correct. { let remaining: HashSet<_> = state.implicit_view.all_allowed_relay_parents().collect(); - state.per_relay_parent.retain(|r, _| remainig.contains(&r)); + state.per_relay_parent.retain(|r, _| remaining.contains(&r)); } // clean up `per_candidate` according to which relay-parents diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 493a0bd84a1c..b1be58542c11 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -79,6 +79,9 @@ async fn activate_leaf( let ancestry_numbers = (min_min..=leaf_number).rev(); let mut ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable(); + let mut next_overseer_message = None; + // How many blocks were actually requested. + let mut requested_len = 0; loop { let (hash, number) = match ancestry_iter.next() { Some((hash, number)) => (hash, number), @@ -88,8 +91,17 @@ async fn activate_leaf( // May be `None` for the last element. let parent_hash = ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + + let msg = virtual_overseer.recv().await; + // It may happen that some blocks were cached by implicit view, + // reuse the message. + if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { + next_overseer_message.replace(msg); + break + } + assert_matches!( - virtual_overseer.recv().await, + msg, AllMessages::ChainApi( ChainApiMessage::BlockHeader(_hash, tx) ) if _hash == hash => { @@ -104,12 +116,17 @@ async fn activate_leaf( tx.send(Ok(Some(header))).unwrap(); } ); + requested_len += 1; } - for hash in ancestry_hashes { + for hash in ancestry_hashes.take(requested_len) { // Check that subsystem job issues a request for a validator set. + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => virtual_overseer.recv().await, + }; assert_matches!( - virtual_overseer.recv().await, + msg, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) ) if parent == hash => { @@ -149,6 +166,35 @@ async fn activate_leaf( } } +async fn assert_hypothetical_depth_requests( + virtual_overseer: &mut VirtualOverseer, + mut expected_requests: Vec<(HypotheticalDepthRequest, Vec)>, +) { + // Requests come with no particular order. + let requests_num = expected_requests.len(); + + for _ in 0..requests_num { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx), + ) => { + let idx = match expected_requests.iter().position(|r| r.0 == request) { + Some(idx) => idx, + None => panic!( + "unexpected hypothetical depth request, no match found for {:?}", + request + ), + }; + let resp = std::mem::take(&mut expected_requests[idx].1); + tx.send(resp).unwrap(); + + expected_requests.remove(idx); + } + ); + } +} + // Test that `seconding_sanity_check` works when a candidate is allowed // for all leaves. #[test] @@ -160,7 +206,9 @@ fn seconding_sanity_check_allowed() { const LEAF_A_DEPTH: BlockNumber = 3; let para_id = test_state.chain_ids[0]; - let leaf_a_hash = Hash::from_low_u64_be(128); + let leaf_b_hash = Hash::from_low_u64_be(128); + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); let leaf_a_parent = get_parent_hash(leaf_a_hash); let activated = ActivatedLeaf { hash: leaf_a_hash, @@ -171,10 +219,9 @@ fn seconding_sanity_check_allowed() { let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - const LEAF_B_BLOCK_NUMBER: BlockNumber = 99; - const LEAF_B_DEPTH: BlockNumber = 2; + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_DEPTH: BlockNumber = 4; - let leaf_b_hash = Hash::from_low_u64_be(256); let activated = ActivatedLeaf { hash: leaf_b_hash, number: LEAF_B_BLOCK_NUMBER, @@ -185,7 +232,7 @@ fn seconding_sanity_check_allowed() { let test_leaf_b = TestLeaf { activated, min_relay_parents }; activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - // activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -266,22 +313,25 @@ fn seconding_sanity_check_allowed() { ); // `seconding_sanity_check` - let expected_request = HypotheticalDepthRequest { + let expected_request_a = HypotheticalDepthRequest { candidate_hash: candidate.hash(), candidate_para: para_id, parent_head_data_hash: pvd.parent_head.hash(), candidate_relay_parent: leaf_a_parent, fragment_tree_relay_parent: leaf_a_hash, }; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::GetHypotheticalDepth( - request, - tx, - )) if request == expected_request => { - tx.send(vec![0, 1, 2, 3]).unwrap(); - } - ); + let expected_request_b = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_b_hash, + }; + assert_hypothetical_depth_requests( + &mut virtual_overseer, + vec![(expected_request_a, vec![0, 1, 2, 3]), (expected_request_b, vec![3])], + ) + .await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, From 48d53cd085401a835189e0d899177289368f2275 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Sun, 12 Jun 2022 00:30:07 +0300 Subject: [PATCH 46/54] Test seconding sanity check --- .../src/tests/prospective_parachains.rs | 339 +++++++++++++++--- 1 file changed, 286 insertions(+), 53 deletions(-) diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index b1be58542c11..44eeb7d42598 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -16,7 +16,7 @@ //! Tests for the backing subsystem with enabled prospective parachains. -use polkadot_node_subsystem::messages::ChainApiMessage; +use polkadot_node_subsystem::{messages::ChainApiMessage, TimeoutExt}; use polkadot_primitives::v2::{BlockNumber, Header}; use super::*; @@ -36,6 +36,7 @@ async fn activate_leaf( virtual_overseer: &mut VirtualOverseer, leaf: TestLeaf, test_state: &TestState, + already_seconded: usize, ) { let TestLeaf { activated, min_relay_parents } = leaf; let leaf_hash = activated.hash; @@ -119,6 +120,21 @@ async fn activate_leaf( requested_len += 1; } + for _ in 0..already_seconded { + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => virtual_overseer.recv().await, + }; + assert_matches!( + msg, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetTreeMembership(.., tx), + ) => { + tx.send(Vec::new()).unwrap(); + } + ); + } + for hash in ancestry_hashes.take(requested_len) { // Check that subsystem job issues a request for a validator set. let msg = match next_overseer_message.take() { @@ -166,6 +182,64 @@ async fn activate_leaf( } } +async fn assert_validate_seconded_candidate( + virtual_overseer: &mut VirtualOverseer, + relay_parent: Hash, + candidate: &CommittedCandidateReceipt, + pov: &PoV, + pvd: &PersistedValidationData, + validation_code: &ValidationCode, + expected_head_data: &HeadData, +) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if parent == relay_parent && hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, + timeout, + tx, + )) if &_pvd == pvd && + &_validation_code == validation_code && + &*_pov == pov && + &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + pvd.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); +} + async fn assert_hypothetical_depth_requests( virtual_overseer: &mut VirtualOverseer, mut expected_requests: Vec<(HypotheticalDepthRequest, Vec)>, @@ -231,8 +305,8 @@ fn seconding_sanity_check_allowed() { let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)]; let test_leaf_b = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; - activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 0).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -262,56 +336,160 @@ fn seconding_sanity_check_allowed() { virtual_overseer.send(FromOverseer::Communication { msg: second }).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) - ) if parent == leaf_a_parent && hash == validation_code.hash() => { - tx.send(Ok(Some(validation_code.clone()))).unwrap(); - } - ); + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + ) + .await; + // `seconding_sanity_check` + let expected_request_a = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_a_hash, + }; + let expected_request_b = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_b_hash, + }; + assert_hypothetical_depth_requests( + &mut virtual_overseer, + vec![(expected_request_a, vec![0, 1, 2, 3]), (expected_request_b, vec![3])], + ) + .await; + // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, - AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, candidate_receipt, - _pov, - timeout, + _pvd, tx, ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == BACKING_EXECUTION_TIMEOUT && - candidate.commitments.hash() == candidate_receipt.commitments_hash => - { - tx.send(Ok(ValidationResult::Valid( - CandidateCommitments { - head_data: expected_head_data.clone(), - horizontal_messages: Vec::new(), - upward_messages: Vec::new(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - test_state.validation_data.clone(), - ))) - .unwrap(); + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); } ); + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + assert_matches!( virtual_overseer.recv().await, - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } - ) if candidate_hash == candidate.hash() => { - tx.send(Ok(())).unwrap(); + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); } ); + virtual_overseer + }); +} + +// Test that `seconding_sanity_check` works when a candidate is disallowed +// for a at least one leaf. +#[test] +fn seconding_sanity_check_disallowed() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_b_hash = Hash::from_low_u64_be(128); + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = ActivatedLeaf { + hash: leaf_a_hash, + number: LEAF_A_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_DEPTH: BlockNumber = 4; + + let activated = ActivatedLeaf { + hash: leaf_b_hash, + number: LEAF_B_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + ) + .await; + // `seconding_sanity_check` let expected_request_a = HypotheticalDepthRequest { candidate_hash: candidate.hash(), @@ -320,16 +498,9 @@ fn seconding_sanity_check_allowed() { candidate_relay_parent: leaf_a_parent, fragment_tree_relay_parent: leaf_a_hash, }; - let expected_request_b = HypotheticalDepthRequest { - candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: leaf_a_parent, - fragment_tree_relay_parent: leaf_b_hash, - }; assert_hypothetical_depth_requests( &mut virtual_overseer, - vec![(expected_request_a, vec![0, 1, 2, 3]), (expected_request_b, vec![3])], + vec![(expected_request_a, vec![0, 1, 2, 3])], ) .await; // Prospective parachains are notified. @@ -344,7 +515,7 @@ fn seconding_sanity_check_allowed() { ), ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); + tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); } ); @@ -374,11 +545,73 @@ fn seconding_sanity_check_allowed() { } ); - virtual_overseer - .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( - ActiveLeavesUpdate::stop_work(leaf_a_hash), - ))) - .await; + // A seconded candidate occupies a depth, try to second another one. + // It is allowed in a new leaf but not allowed in the old one. + // Expect it to be rejected. + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 1).await; + let leaf_a_grandparent = get_parent_hash(leaf_a_parent); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_grandparent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_grandparent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_grandparent, + fragment_tree_relay_parent: leaf_a_hash, + }; + let expected_request_b = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_grandparent, + fragment_tree_relay_parent: leaf_b_hash, + }; + assert_hypothetical_depth_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, vec![3]), // All depths are occupied. + (expected_request_b, vec![1]), + ], + ) + .await; + + assert!(virtual_overseer + .recv() + .timeout(std::time::Duration::from_millis(50)) + .await + .is_none()); + virtual_overseer }); } From 13e09624e620dfe754cc5e02335f3cd17b795717 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 13 Jun 2022 16:01:49 +0300 Subject: [PATCH 47/54] Import statement order Before creating an entry in `PerCandidateState` map wait for the approval from the prospective parachains --- node/core/backing/src/lib.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 2dddf57ac8d6..6bb4eee8b20e 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1531,17 +1531,6 @@ async fn import_statement( // our active leaves. if let StatementWithPVD::Seconded(candidate, pvd) = statement.payload() { if !per_candidate.contains_key(&candidate_hash) { - per_candidate.insert( - candidate_hash, - PerCandidateState { - persisted_validation_data: pvd.clone(), - // This is set after importing when seconding locally. - seconded_locally: false, - para_id: candidate.descriptor().para_id, - relay_parent: candidate.descriptor().relay_parent, - }, - ); - if rp_state.prospective_parachains_mode.is_enabled() { let (tx, rx) = oneshot::channel(); ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( @@ -1567,6 +1556,18 @@ async fn import_statement( }, } } + + // Only save the candidate if it was approved by prospective parachains. + per_candidate.insert( + candidate_hash, + PerCandidateState { + persisted_validation_data: pvd.clone(), + // This is set after importing when seconding locally. + seconded_locally: false, + para_id: candidate.descriptor().para_id, + relay_parent: candidate.descriptor().relay_parent, + }, + ); } } From b8b6ca0f1cbb00412680a6e4dbda0921f45416d7 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 13 Jun 2022 16:03:34 +0300 Subject: [PATCH 48/54] Add a test for correct state updates --- .../src/tests/prospective_parachains.rs | 178 +++++++++++++++++- node/subsystem-types/src/messages.rs | 2 +- 2 files changed, 176 insertions(+), 4 deletions(-) diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 44eeb7d42598..b75e4152224d 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -36,7 +36,7 @@ async fn activate_leaf( virtual_overseer: &mut VirtualOverseer, leaf: TestLeaf, test_state: &TestState, - already_seconded: usize, + seconded_in_view: usize, ) { let TestLeaf { activated, min_relay_parents } = leaf; let leaf_hash = activated.hash; @@ -120,7 +120,7 @@ async fn activate_leaf( requested_len += 1; } - for _ in 0..already_seconded { + for _ in 0..seconded_in_view { let msg = match next_overseer_message.take() { Some(msg) => msg, None => virtual_overseer.recv().await, @@ -414,7 +414,7 @@ fn seconding_sanity_check_allowed() { } // Test that `seconding_sanity_check` works when a candidate is disallowed -// for a at least one leaf. +// for at least one leaf. #[test] fn seconding_sanity_check_disallowed() { let test_state = TestState::default(); @@ -615,3 +615,175 @@ fn seconding_sanity_check_disallowed() { virtual_overseer }); } + +// Test that a seconded candidate which is not approved by prospective parachains +// subsystem doesn't change the view. +#[test] +fn prospective_parachains_reject_candidate() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = ActivatedLeaf { + hash: leaf_a_hash, + number: LEAF_A_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = vec![( + HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_a_hash, + }, + vec![0, 1, 2, 3], + )]; + assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()).await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Reject it. + tx.send(Vec::new()).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Invalid( + relay_parent, + candidate_receipt, + )) if candidate_receipt.descriptor() == candidate.descriptor() && + candidate_receipt.commitments_hash == candidate.commitments.hash() && + relay_parent == leaf_a_parent + ); + + // Try seconding the same candidate. + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + ) + .await; + + // `seconding_sanity_check` + assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a).await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index db54ddd7be0b..9652cff20ba7 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -942,7 +942,7 @@ pub enum PvfCheckerMessage {} /// A request for the depths a hypothetical candidate would occupy within /// some fragment tree. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct HypotheticalDepthRequest { /// The hash of the potential candidate. pub candidate_hash: CandidateHash, From b601bbc97f7e96cb8064f3f7a1d0e71750189e4c Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Mon, 13 Jun 2022 16:18:35 +0300 Subject: [PATCH 49/54] Second multiple candidates per relay parent test --- node/core/backing/src/tests/mod.rs | 2 +- .../src/tests/prospective_parachains.rs | 131 ++++++++++++++++++ 2 files changed, 132 insertions(+), 1 deletion(-) diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index 0f0a319f5bf6..e6b02ae0e3e2 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -197,7 +197,7 @@ fn make_erasure_root(test: &TestState, pov: PoV) -> Hash { erasure_coding::branches(&chunks).root() } -#[derive(Default)] +#[derive(Default, Clone)] struct TestCandidateBuilder { para_id: ParaId, head_data: HeadData, diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index b75e4152224d..60de5f292365 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -787,3 +787,134 @@ fn prospective_parachains_reject_candidate() { virtual_overseer }); } + +// Test that a validator can second multiple candidates per single relay parent. +#[test] +fn second_multiple_candidates_per_relay_parent() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let leaf_grandparent = get_parent_hash(leaf_parent); + let activated = ActivatedLeaf { + hash: leaf_hash, + number: LEAF_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + }; + let mut candidate_b = candidate_a.clone(); + candidate_b.relay_parent = leaf_grandparent; + + // With depths. + let candidate_a = (candidate_a.build(), 1); + let candidate_b = (candidate_b.build(), 2); + + for candidate in &[candidate_a, candidate_b] { + let (candidate, depth) = candidate; + let second = CandidateBackingMessage::Second( + leaf_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate.descriptor().relay_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = vec![( + HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: candidate.descriptor().relay_parent, + fragment_tree_relay_parent: leaf_hash, + }, + vec![*depth], + )]; + assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if &candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == candidate.descriptor().relay_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(candidate.descriptor().relay_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + } + + virtual_overseer + }); +} From 21fb1ecd217884c410b0700af2888c299480216c Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 14 Jun 2022 20:00:54 +0300 Subject: [PATCH 50/54] Add backing tests with prospective paras --- .../src/tests/prospective_parachains.rs | 202 ++++++++++++++++++ 1 file changed, 202 insertions(+) diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 60de5f292365..93af21d82f79 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -190,6 +190,7 @@ async fn assert_validate_seconded_candidate( pvd: &PersistedValidationData, validation_code: &ValidationCode, expected_head_data: &HeadData, + fetch_pov: bool, ) { assert_matches!( virtual_overseer.recv().await, @@ -199,6 +200,22 @@ async fn assert_validate_seconded_candidate( tx.send(Ok(Some(validation_code.clone()))).unwrap(); } ); + + if fetch_pov { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { + relay_parent: hash, + tx, + .. + } + ) if hash == relay_parent => { + tx.send(pov.clone()).unwrap(); + } + ); + } + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive( @@ -344,6 +361,7 @@ fn seconding_sanity_check_allowed() { &pvd, &validation_code, expected_head_data, + false, ) .await; @@ -487,6 +505,7 @@ fn seconding_sanity_check_disallowed() { &pvd, &validation_code, expected_head_data, + false, ) .await; @@ -579,6 +598,7 @@ fn seconding_sanity_check_disallowed() { &pvd, &validation_code, expected_head_data, + false, ) .await; @@ -676,6 +696,7 @@ fn prospective_parachains_reject_candidate() { &pvd, &validation_code, expected_head_data, + false, ) .await; @@ -737,6 +758,7 @@ fn prospective_parachains_reject_candidate() { &pvd, &validation_code, expected_head_data, + false, ) .await; @@ -855,6 +877,7 @@ fn second_multiple_candidates_per_relay_parent() { &pvd, &validation_code, expected_head_data, + false, ) .await; @@ -918,3 +941,182 @@ fn second_multiple_candidates_per_relay_parent() { virtual_overseer }); } + +// Test that the candidate reaches quorum successfully. +#[test] +fn backing_works() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let activated = ActivatedLeaf { + hash: leaf_hash, + number: LEAF_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + validation_code: validation_code.0.clone(), + persisted_validation_data_hash: pvd.hash(), + ..Default::default() + } + .build(); + + let candidate_a_hash = candidate_a.hash(); + + let public1 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), + ) + .await + .expect("Insert key into keystore"); + let public2 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .await + .expect("Insert key into keystore"); + + // Signing context should have a parent hash candidate is based on. + let signing_context = + SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; + let signed_a = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + &signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + let signed_b = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Valid(candidate_a_hash), + &signing_context, + ValidatorIndex(5), + &public1.into(), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + let statement = CandidateBackingMessage::Statement(leaf_parent, signed_a.clone()); + + virtual_overseer.send(FromOverseer::Communication { msg: statement }).await; + + // Prospective parachains are notified about candidate seconded first. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate_a && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate_a_hash, + test_state.session(), + vec![ValidatorIndex(2)], + ) + .await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate_a.descriptor().relay_parent, + &candidate_a, + &pov, + &pvd, + &validation_code, + expected_head_data, + true, + ) + .await; + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate_a_hash, + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + // Prospective parachains are notified about candidate backed. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked( + candidate_para_id, candidate_hash + ), + ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate_a.to_plain()); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, _stmt) + ) => { + assert_eq!(leaf_parent, hash); + } + ); + + let statement = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); + + virtual_overseer.send(FromOverseer::Communication { msg: statement }).await; + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate_a_hash, + test_state.session(), + vec![ValidatorIndex(5)], + ) + .await; + virtual_overseer + }); +} From 14da4f9f6b87fd9348b025001e27912b94e989c9 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 14 Jun 2022 21:39:02 +0300 Subject: [PATCH 51/54] Second more than one test without prospective paras --- node/core/backing/src/tests/mod.rs | 182 ++++++++++++++++++++++++++++- 1 file changed, 181 insertions(+), 1 deletion(-) diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index e6b02ae0e3e2..277b85ed3b8d 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -28,7 +28,7 @@ use polkadot_node_subsystem::{ AllMessages, CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest, ValidationFailed, }, - ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, LeafStatus, OverseerSignal, + ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, LeafStatus, OverseerSignal, TimeoutExt, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::v2::{ @@ -2099,3 +2099,183 @@ fn observes_backing_even_if_not_validator() { virtual_overseer }); } + +// Tests that it's impossible to second multiple candidates per relay parent +// without prospective parachains. +#[test] +fn cannot_second_multiple_candidates_per_parent() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_hash = pov.hash(); + let candidate_builder = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + }; + let candidate = candidate_builder.clone().build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, + timeout, + tx, + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + test_state.validation_data.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == test_state.relay_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(test_state.relay_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + // Try to second candidate with the same relay parent again. + + // Make sure the candidate hash is different. + let validation_code = ValidationCode(vec![4, 5, 6]); + let mut candidate_builder = candidate_builder; + candidate_builder.validation_code = validation_code.0.clone(); + let candidate = candidate_builder.build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication { msg: second }).await; + + // The validation is still requested. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive(.., tx), + ) => { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + test_state.validation_data.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + // Validation done, but the candidate is rejected cause of 0-depth being already occupied. + + assert!(virtual_overseer + .recv() + .timeout(std::time::Duration::from_millis(50)) + .await + .is_none()); + + virtual_overseer + }); +} From 505852db061c40ab44a588ee8590e01785892c28 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 15 Jun 2022 22:49:44 +0300 Subject: [PATCH 52/54] Add a test for prospective para blocks --- node/core/backing/src/tests/mod.rs | 39 +-- .../src/tests/prospective_parachains.rs | 242 +++++++++++++++++- 2 files changed, 257 insertions(+), 24 deletions(-) diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index 277b85ed3b8d..36d3538572cd 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -189,9 +189,8 @@ fn test_harness>( )); } -fn make_erasure_root(test: &TestState, pov: PoV) -> Hash { - let available_data = - AvailableData { validation_data: test.validation_data.clone(), pov: Arc::new(pov) }; +fn make_erasure_root(test: &TestState, pov: PoV, validation_data: PersistedValidationData) -> Hash { + let available_data = AvailableData { validation_data, pov: Arc::new(pov) }; let chunks = erasure_coding::obtain_chunks_v1(test.validators.len(), &available_data).unwrap(); erasure_coding::branches(&chunks).root() @@ -348,7 +347,7 @@ fn backing_second_works() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -469,7 +468,7 @@ fn backing_works() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), validation_code: validation_code.0.clone(), ..Default::default() } @@ -665,7 +664,7 @@ fn backing_works_while_validation_ongoing() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), validation_code: validation_code.0.clone(), ..Default::default() } @@ -890,7 +889,7 @@ fn backing_misbehavior_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), head_data: expected_head_data.clone(), validation_code: validation_code.0.clone(), ..Default::default() @@ -1119,7 +1118,7 @@ fn backing_dont_second_invalid() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash: pov_hash_a, - erasure_root: make_erasure_root(&test_state, pov_block_a.clone()), + erasure_root: make_erasure_root(&test_state, pov_block_a.clone(), pvd_a.clone()), persisted_validation_data_hash: pvd_a.hash(), validation_code: validation_code_a.0.clone(), ..Default::default() @@ -1130,7 +1129,7 @@ fn backing_dont_second_invalid() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash: pov_hash_b, - erasure_root: make_erasure_root(&test_state, pov_block_b.clone()), + erasure_root: make_erasure_root(&test_state, pov_block_b.clone(), pvd_b.clone()), head_data: expected_head_data.clone(), persisted_validation_data_hash: pvd_b.hash(), validation_code: validation_code_b.0.clone(), @@ -1227,7 +1226,7 @@ fn backing_dont_second_invalid() { new_validation_code: None, processed_downward_messages: 0, hrmp_watermark: 0, - }, test_state.validation_data.clone()), + }, pvd_b.clone()), )).unwrap(); } ); @@ -1288,7 +1287,7 @@ fn backing_second_after_first_fails_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -1395,7 +1394,11 @@ fn backing_second_after_first_fails_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov_to_second.clone()), + erasure_root: make_erasure_root( + &test_state, + pov_to_second.clone(), + pvd_to_second.clone(), + ), persisted_validation_data_hash: pvd_to_second.hash(), validation_code: validation_code_to_second.0.clone(), ..Default::default() @@ -1453,7 +1456,7 @@ fn backing_works_after_failed_validation() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), validation_code: validation_code.0.clone(), ..Default::default() } @@ -1578,7 +1581,7 @@ fn backing_doesnt_second_wrong_collator() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -1636,7 +1639,7 @@ fn validation_work_ignores_wrong_collator() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -1766,7 +1769,7 @@ fn retry_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -1980,7 +1983,7 @@ fn observes_backing_even_if_not_validator() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -2120,7 +2123,7 @@ fn cannot_second_multiple_candidates_per_parent() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 93af21d82f79..16c12cac08cd 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -337,7 +337,7 @@ fn seconding_sanity_check_allowed() { relay_parent: leaf_a_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -481,7 +481,7 @@ fn seconding_sanity_check_disallowed() { relay_parent: leaf_a_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -574,7 +574,7 @@ fn seconding_sanity_check_disallowed() { relay_parent: leaf_a_grandparent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -672,7 +672,7 @@ fn prospective_parachains_reject_candidate() { relay_parent: leaf_a_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -846,7 +846,7 @@ fn second_multiple_candidates_per_relay_parent() { relay_parent: leaf_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), persisted_validation_data_hash: pvd.hash(), validation_code: validation_code.0.clone(), ..Default::default() @@ -978,7 +978,7 @@ fn backing_works() { relay_parent: leaf_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), validation_code: validation_code.0.clone(), persisted_validation_data_hash: pvd.hash(), ..Default::default() @@ -1120,3 +1120,233 @@ fn backing_works() { virtual_overseer }); } + +// Tests that validators start work on consecutive prospective parachain blocks. +#[test] +fn concurrent_dependent_candidates() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a grandparent of the activated `leaf`, + // candidate `b` -- in parent. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let leaf_grandparent = get_parent_hash(leaf_parent); + let activated = ActivatedLeaf { + hash: leaf_hash, + number: LEAF_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let head_data = &[ + HeadData(vec![10, 20, 30]), // Before `a`. + HeadData(vec![11, 21, 31]), // After `a`. + HeadData(vec![12, 22]), // After `b`. + ]; + + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = PersistedValidationData { + parent_head: head_data[0].clone(), + relay_parent_number: LEAF_BLOCK_NUMBER - 2, + relay_parent_storage_root: Hash::zero(), + max_pov_size: 1024, + }; + + let pov_b = PoV { block_data: BlockData(vec![22, 14, 100]) }; + let pvd_b = PersistedValidationData { + parent_head: head_data[1].clone(), + relay_parent_number: LEAF_BLOCK_NUMBER - 1, + relay_parent_storage_root: Hash::zero(), + max_pov_size: 1024, + }; + let validation_code = ValidationCode(vec![1, 2, 3]); + + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_grandparent, + pov_hash: pov_a.hash(), + head_data: head_data[1].clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + let candidate_b = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash: pov_b.hash(), + head_data: head_data[2].clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), + persisted_validation_data_hash: pvd_b.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + let candidate_a_hash = candidate_a.hash(); + let candidate_b_hash = candidate_b.hash(); + + let public1 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), + ) + .await + .expect("Insert key into keystore"); + let public2 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .await + .expect("Insert key into keystore"); + + // Signing context should have a parent hash candidate is based on. + let signing_context = + SigningContext { parent_hash: leaf_grandparent, session_index: test_state.session() }; + let signed_a = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()), + &signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + let signing_context = + SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; + let signed_b = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_b.clone(), pvd_b.clone()), + &signing_context, + ValidatorIndex(5), + &public1.into(), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + let statement_a = CandidateBackingMessage::Statement(leaf_grandparent, signed_a.clone()); + let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); + + virtual_overseer.send(FromOverseer::Communication { msg: statement_a }).await; + // At this point the subsystem waits for response, the previous message is received, + // send a second one without blocking. + let _ = virtual_overseer + .tx + .start_send_unpin(FromOverseer::Communication { msg: statement_b }); + + let mut valid_statements = HashSet::new(); + + loop { + let msg = virtual_overseer + .recv() + .timeout(std::time::Duration::from_secs(1)) + .await + .expect("overseer recv timed out"); + + // Order is not guaranteed since we have 2 statements being handled concurrently. + match msg { + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(.., tx), + ) => { + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + }, + AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::ImportStatements { + .. + }) => {}, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ValidationCodeByHash(_, tx), + )) => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + }, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { candidate_hash, tx, .. }, + ) => { + let pov = if candidate_hash == candidate_a_hash { + &pov_a + } else if candidate_hash == candidate_b_hash { + &pov_b + } else { + panic!("unknown candidate hash") + }; + tx.send(pov.clone()).unwrap(); + }, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive(.., candidate, _, _, tx), + ) => { + let candidate_hash = candidate.hash(); + let (head_data, pvd) = if candidate_hash == candidate_a_hash { + (&head_data[1], &pvd_a) + } else if candidate_hash == candidate_b_hash { + (&head_data[2], &pvd_b) + } else { + panic!("unknown candidate hash") + }; + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + pvd.clone(), + ))) + .unwrap(); + }, + AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { + tx, + .. + }) => { + tx.send(Ok(())).unwrap(); + }, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked(..), + ) => {}, + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData(..)) => {}, + AllMessages::StatementDistribution(StatementDistributionMessage::Share( + _, + statement, + )) => { + assert_eq!(statement.validator_index(), ValidatorIndex(0)); + let payload = statement.payload(); + assert_matches!( + payload.clone(), + Statement::Valid(hash) + if hash == candidate_a_hash || hash == candidate_b_hash => + { + assert!(valid_statements.insert(hash)); + } + ); + + if valid_statements.len() == 2 { + break + } + }, + _ => panic!("unexpected message received from overseer: {:?}", msg), + } + } + + assert!( + valid_statements.contains(&candidate_a_hash) && + valid_statements.contains(&candidate_b_hash) + ); + + virtual_overseer + }); +} From cb21c13664112c7f8ed78ad1a50f44ed05ce7628 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 22 Jun 2022 15:34:33 +0300 Subject: [PATCH 53/54] Update malus --- .../src/variants/suggest_garbage_candidate.rs | 47 ++++++++++--------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/node/malus/src/variants/suggest_garbage_candidate.rs b/node/malus/src/variants/suggest_garbage_candidate.rs index ef987278decb..01272910ba39 100644 --- a/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/node/malus/src/variants/suggest_garbage_candidate.rs @@ -30,7 +30,6 @@ use polkadot_cli::{ ProvideRuntimeApi, SpawnNamed, }, }; -use polkadot_node_core_candidate_validation::find_validation_data; use polkadot_node_primitives::{AvailableData, BlockData, PoV}; use polkadot_primitives::v2::{CandidateDescriptor, CandidateHash}; @@ -84,7 +83,13 @@ where ) -> Option> { match msg { FromOverseer::Communication { - msg: CandidateBackingMessage::Second(relay_parent, candidate, _pov), + msg: + CandidateBackingMessage::Second( + relay_parent, + candidate, + persisted_validation_data, + _pov, + ), } => { gum::debug!( target: MALUS, @@ -99,7 +104,7 @@ where let mut new_sender = subsystem_sender.clone(); let _candidate = candidate.clone(); self.spawner.spawn_blocking( - "malus-get-validation-data", + "malus-get-n-validators", Some("malus"), Box::pin(async move { gum::trace!(target: MALUS, "Requesting validators"); @@ -110,25 +115,16 @@ where .unwrap() .len(); gum::trace!(target: MALUS, "Validators {}", n_validators); - match find_validation_data(&mut new_sender, &_candidate.descriptor()).await - { - Ok(Some((validation_data, validation_code))) => { - sender - .send((validation_data, validation_code, n_validators)) - .expect("channel is still open"); - }, - _ => { - panic!("Unable to fetch validation data"); - }, - } + sender.send(n_validators).expect("channel is still open"); }), ); - let (validation_data, validation_code, n_validators) = receiver.recv().unwrap(); + let n_validators = receiver.recv().unwrap(); - let validation_data_hash = validation_data.hash(); - let validation_code_hash = validation_code.hash(); - let validation_data_relay_parent_number = validation_data.relay_parent_number; + let validation_data_hash = persisted_validation_data.hash(); + let validation_code_hash = candidate.descriptor.validation_code_hash; + let validation_data_relay_parent_number = + persisted_validation_data.relay_parent_number; gum::trace!( target: MALUS, @@ -138,11 +134,13 @@ where ?validation_data_hash, ?validation_code_hash, ?validation_data_relay_parent_number, - "Fetched validation data." + "Fetched current validators set" ); - let malicious_available_data = - AvailableData { pov: Arc::new(pov.clone()), validation_data }; + let malicious_available_data = AvailableData { + pov: Arc::new(pov.clone()), + validation_data: persisted_validation_data.clone(), + }; let pov_hash = pov.hash(); let erasure_root = { @@ -205,7 +203,12 @@ where .insert(malicious_candidate_hash, candidate.hash()); let message = FromOverseer::Communication { - msg: CandidateBackingMessage::Second(relay_parent, malicious_candidate, pov), + msg: CandidateBackingMessage::Second( + relay_parent, + malicious_candidate, + persisted_validation_data, + pov, + ), }; Some(message) From d8314175ae8e3b9f52a20e393b8bca61a19dca4a Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 22 Jun 2022 16:03:17 +0300 Subject: [PATCH 54/54] typos --- node/core/backing/src/lib.rs | 4 ++-- node/core/prospective-parachains/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 6bb4eee8b20e..66d1453e9e99 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -48,7 +48,7 @@ //! most recent blocks in the relay-chain (which is in fact a tree) which could be built //! upon. Depth is always measured against active leaves, and the valid relay-parent that //! each candidate can have is determined by the active leaves. The Prospective Parachains -//! subsystem enforces that the relay-parent increases monotonoically, so that logic +//! subsystem enforces that the relay-parent increases monotonically, so that logic //! is not handled here. By communicating with the Prospective Parachains subsystem, //! this subsystem extrapolates an "implicit view" from the set of currently active leaves, //! which determines the set of all recent relay-chain block hashes which could be relay-parents @@ -248,7 +248,7 @@ struct ActiveLeafState { /// depth under every active leaf has an empty entry in this map. /// /// When prospective parachains are disabled, the only depth - /// which is allowed is '0'. + /// which is allowed is 0. seconded_at_depth: BTreeMap, } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 1043d29c9bee..f3d437308210 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -337,7 +337,7 @@ async fn handle_candidate_backed( target: LOG_TARGET, para_id = ?para, ?candidate_hash, - "Received instructio to back candidate", + "Received instruction to back candidate", ); return Ok(())