diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 85ccb96f693..9a6c314a38b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3659,6 +3659,40 @@ impl BeaconChain { .await } + /// Checks if the provided execution proof(s) can make any cached blocks available/importable. + /// Otherwise caches the proof(s) in the data availability checker. + /// + /// Note: Unlike blobs/data columns, execution proofs do not carry a signed header because + /// they arrive after the block has been made. So we do not perform any slashing-related checks for proofs. + pub async fn check_gossip_execution_proof_availability_and_import< + O: crate::observed_data_sidecars::ObservationStrategy, + >( + self: &Arc, + block_root: Hash256, + execution_proofs: impl IntoIterator< + Item = crate::execution_proof_verification::GossipVerifiedExecutionProof, + >, + ) -> Result { + let availability = self + .data_availability_checker + .put_gossip_verified_execution_proofs(block_root, execution_proofs)?; + + // Note: the slot is only used for logging, default to 0 for now. The proof type may change + // in the future making the block's slot easily available. + let slot = self + .data_availability_checker + .get_cached_block(&block_root) + .map(|b| match b { + BlockProcessStatus::Unknown => Slot::new(0), + BlockProcessStatus::NotValidated(block, _) + | BlockProcessStatus::ExecutionValidated(block) => block.slot(), + }) + .unwrap_or(Slot::new(0)); + + self.process_availability(slot, availability, || Ok(())) + .await + } + fn check_columns_for_slashability<'a>( self: &Arc, block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 5564c7916fa..76a94319ba3 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -942,6 +942,13 @@ where }; debug!(?custody_context, "Loading persisted custody context"); + // Extract execution proof requirements before moving chain_config + let min_execution_proofs_required = if self.chain_config.stateless_validation { + Some(self.chain_config.stateless_min_proofs_required) + } else { + None + }; + let beacon_chain = BeaconChain { spec: self.spec.clone(), config: self.chain_config, @@ -1019,6 +1026,7 @@ where store, custody_context, self.spec, + min_execution_proofs_required, ) .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index a7defa9fa2a..8adaa07d46c 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -81,6 +81,34 @@ pub struct ChainConfig { pub prepare_payload_lookahead: Duration, /// Use EL-free optimistic sync for the finalized part of the chain. pub optimistic_finalized_sync: bool, + /// Enable stateless validation mode for new payloads. + /// + /// Currently this means that the node will accept blocks optimistically + /// and maintain metadata about which blocks have been proven and which ones have not. + pub stateless_validation: bool, + /// Generate execution proofs for all blocks received. + /// + /// Nodes that have this enabled will be used to bootstrap proofs into the subnets, + /// whether they are a proposer or not. + pub generate_execution_proofs: bool, + /// Maximum number of execution proof subnets this node will participate in. + /// + /// This is a per-node configuration that must not exceed the protocol maximum + /// (MAX_EXECUTION_PROOF_SUBNETS). Nodes may choose to participate in fewer + /// subnets to reduce resource usage, but this limits the number of proofs they + /// can generate or validate. + /// + /// TODO: We can remove the sequential allocations with a random allocation, so that lower numbered + /// TODO: subnets are not important. Current strategy is mostly POC. + /// + /// Note: stateless_min_proofs_required must not exceed this value, as a node + /// cannot require more proofs than the number of subnets it participates in. + pub max_execution_proof_subnets: u64, + /// Minimum number of proofs required to consider a block valid in stateless mode. + /// + /// Must be between 1 and max_execution_proof_subnets. Higher values provide + /// more security but may increase block validation latency. + pub stateless_min_proofs_required: usize, /// The size of the shuffling cache, pub shuffling_cache_size: usize, /// If using a weak-subjectivity sync, whether we should download blocks all the way back to @@ -144,6 +172,10 @@ impl Default for ChainConfig { prepare_payload_lookahead: Duration::from_secs(4), // This value isn't actually read except in tests. optimistic_finalized_sync: true, + stateless_validation: false, + generate_execution_proofs: false, + max_execution_proof_subnets: 8, + stateless_min_proofs_required: 1, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, genesis_backfill: false, complete_blob_backfill: false, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 43b7d8f7ea3..8923a4c777f 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -22,7 +22,7 @@ use tracing::{debug, error, instrument}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ BlobSidecarList, BlockImportSource, ChainSpec, DataColumnSidecar, DataColumnSidecarList, Epoch, - EthSpec, Hash256, SignedBeaconBlock, Slot, + EthSpec, ExecutionProof, Hash256, SignedBeaconBlock, Slot, }; mod error; @@ -100,8 +100,8 @@ pub enum DataColumnReconstructionResult { /// This type is returned after adding a block / blob to the `DataAvailabilityChecker`. /// -/// Indicates if the block is fully `Available` or if we need blobs or blocks -/// to "complete" the requirements for an `AvailableBlock`. +/// Indicates if the block is ready for import or if we need blobs, columns, or execution proofs +/// to "complete" the requirements for an importable block. pub enum Availability { MissingComponents(Hash256), Available(Box>), @@ -113,7 +113,9 @@ impl Debug for Availability { Self::MissingComponents(block_root) => { write!(f, "MissingComponents({})", block_root) } - Self::Available(block) => write!(f, "Available({:?})", block.import_data.block_root), + Self::Available(block) => { + write!(f, "ReadyForImport({:?})", block.import_data.block_root) + } } } } @@ -126,12 +128,14 @@ impl DataAvailabilityChecker { store: BeaconStore, custody_context: Arc>, spec: Arc, + min_execution_proofs_required: Option, ) -> Result { let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, custody_context.clone(), spec.clone(), + min_execution_proofs_required, )?; Ok(Self { complete_blob_backfill, @@ -210,6 +214,26 @@ impl DataAvailabilityChecker { self.availability_cache.peek_data_columns(block_root) } + /// Put gossip verified execution proofs into the availability cache. + /// + /// This allows stateless validation nodes to collect proofs for execution payloads. + pub fn put_gossip_verified_execution_proofs( + &self, + block_root: Hash256, + execution_proofs: I, + ) -> Result, AvailabilityCheckError> + where + I: IntoIterator< + Item = crate::execution_proof_verification::GossipVerifiedExecutionProof, + >, + O: crate::observed_data_sidecars::ObservationStrategy, + { + self.availability_cache.put_execution_proofs( + block_root, + execution_proofs.into_iter().map(|v| v.into_inner()), + ) + } + /// Put a list of blobs received via RPC into the availability cache. This performs KZG /// verification on the blobs in the list. #[instrument(skip_all, level = "trace")] @@ -386,6 +410,7 @@ impl DataAvailabilityChecker { block, blob_data: AvailableBlockData::Blobs(blob_list), blobs_available_timestamp: None, + proof_data: ImportableProofData::NoneRequired, spec: self.spec.clone(), })) } else { @@ -411,6 +436,7 @@ impl DataAvailabilityChecker { .collect(), ), blobs_available_timestamp: None, + proof_data: ImportableProofData::NoneRequired, spec: self.spec.clone(), })) } else { @@ -423,6 +449,7 @@ impl DataAvailabilityChecker { block, blob_data: AvailableBlockData::NoData, blobs_available_timestamp: None, + proof_data: ImportableProofData::NoneRequired, spec: self.spec.clone(), })) } @@ -479,6 +506,7 @@ impl DataAvailabilityChecker { block, blob_data: AvailableBlockData::Blobs(blobs), blobs_available_timestamp: None, + proof_data: ImportableProofData::NoneRequired, spec: self.spec.clone(), }) } else { @@ -493,6 +521,7 @@ impl DataAvailabilityChecker { data_columns.into_iter().map(|d| d.into_inner()).collect(), ), blobs_available_timestamp: None, + proof_data: ImportableProofData::NoneRequired, spec: self.spec.clone(), }) } else { @@ -504,6 +533,7 @@ impl DataAvailabilityChecker { block, blob_data: AvailableBlockData::NoData, blobs_available_timestamp: None, + proof_data: ImportableProofData::NoneRequired, spec: self.spec.clone(), }) }; @@ -527,6 +557,13 @@ impl DataAvailabilityChecker { self.da_check_required_for_epoch(epoch) && self.spec.is_peer_das_enabled_for_epoch(epoch) } + /// Determines the execution proof requirements for an epoch. + /// - If the epoch is from prior to the data availability boundary, no execution proofs are required. + /// - This allows historical blocks to sync without waiting for execution proofs. + pub fn execution_proofs_required_for_epoch(&self, epoch: Epoch) -> bool { + self.da_check_required_for_epoch(epoch) // Only for recent blocks within DA boundary + } + /// See `Self::blobs_required_for_epoch` fn blobs_required_for_block(&self, block: &SignedBeaconBlock) -> bool { block.num_expected_blobs() > 0 && self.blobs_required_for_epoch(block.epoch()) @@ -754,6 +791,25 @@ pub enum AvailableBlockData { DataColumns(DataColumnSidecarList), } +/// Execution proof data +/// +/// The proofs that were collected that convinced the node that the +/// Block's execution payload was indeed valid. +#[derive(Debug, Clone)] +pub enum ImportableProofData { + /// Execution proofs that were used to validate this block. + /// The number of proofs is at least `min_proofs_required` because + /// that is needed for the block to be seen as available/importable. + /// + /// TODO: Its possible for us to provide a proof that that the + /// ExecutionPayload was invalid. Is this useful? This would allow + /// us to distinguish between the case where the prover cannot create a + /// proof of a valid payload. + Proofs(Vec), + /// This is the case when the node opts to not receive proofs + NoneRequired, +} + /// A fully available block that is ready to be imported into fork choice. #[derive(Debug)] pub struct AvailableBlock { @@ -762,6 +818,8 @@ pub struct AvailableBlock { blob_data: AvailableBlockData, /// Timestamp at which this block first became available (UNIX timestamp, time since 1970). blobs_available_timestamp: Option, + /// Execution proof data for this block + pub proof_data: ImportableProofData, pub spec: Arc, } @@ -770,6 +828,7 @@ impl AvailableBlock { block_root: Hash256, block: Arc>, data: AvailableBlockData, + proof_data: ImportableProofData, spec: Arc, ) -> Self { Self { @@ -777,6 +836,7 @@ impl AvailableBlock { block, blob_data: data, blobs_available_timestamp: None, + proof_data, spec, } } @@ -828,6 +888,7 @@ impl AvailableBlock { } }, blobs_available_timestamp: self.blobs_available_timestamp, + proof_data: self.proof_data.clone(), spec: self.spec.clone(), }) } @@ -1112,6 +1173,7 @@ mod test { store, custody_context, spec, + None, ) .expect("should initialise data availability checker") } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 42f6dbd8567..0ca489becf9 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -6,18 +6,21 @@ use crate::blob_verification::KzgVerifiedBlob; use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, }; -use crate::data_availability_checker::{Availability, AvailabilityCheckError}; +use crate::data_availability_checker::{Availability, AvailabilityCheckError, ImportableProofData}; use crate::data_column_verification::KzgVerifiedCustodyDataColumn; +use crate::execution_proof_verification::VerifiedExecutionProof; use crate::{BeaconChainTypes, BlockProcessStatus}; use lighthouse_tracing::SPAN_PENDING_COMPONENTS; use lru::LruCache; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::cmp::Ordering; +use std::collections::HashMap; use std::num::NonZeroUsize; use std::sync::Arc; use tracing::{Span, debug, debug_span}; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobIdentifier; +use types::execution_proof_subnet_id::ExecutionProofSubnetId; use types::{ BlobSidecar, BlockImportSource, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, @@ -76,6 +79,8 @@ pub struct PendingComponents { pub verified_data_columns: Vec>, pub block: Option>, pub reconstruction_started: bool, + /// Verified execution proofs indexed by subnet ID + pub verified_execution_proofs: HashMap, span: Span, } @@ -199,6 +204,25 @@ impl PendingComponents { Ok(()) } + /// Merges execution proofs into the cache. + /// Only inserts proofs that are not already present. + pub fn merge_execution_proofs>( + &mut self, + execution_proofs: I, + ) { + for execution_proof in execution_proofs { + let subnet_id = execution_proof.as_proof().subnet_id; + self.verified_execution_proofs + .entry(subnet_id) + .or_insert(execution_proof); + } + } + + /// Returns the number of execution proofs for this block + pub fn execution_proof_count(&self) -> usize { + self.verified_execution_proofs.len() + } + /// Inserts a new block and revalidates the existing blobs against it. /// /// Blobs that don't match the new block's commitments are evicted. @@ -216,6 +240,7 @@ impl PendingComponents { pub fn make_available( &self, spec: &Arc, + min_execution_proofs_required: Option, num_expected_columns_opt: Option, recover: R, ) -> Result>, AvailabilityCheckError> @@ -294,6 +319,31 @@ impl PendingComponents { return Ok(None); }; + // Check execution proof requirements + let proof_data = if let Some(num_expected_proofs) = min_execution_proofs_required { + // TODO(zkproofs): consider waving the proofs requirements during sync + let num_received_proofs = self.verified_execution_proofs.len(); + match num_received_proofs.cmp(&num_expected_proofs) { + Ordering::Greater | Ordering::Equal => { + // More proofs than required (fine), we have enough proofs to mark the block as + // validated + ImportableProofData::Proofs( + self.verified_execution_proofs + .values() + .map(|vp| vp.clone().into_inner()) + .collect(), + ) + } + Ordering::Less => { + // Execution proofs are required but we don't have enough yet + return Ok(None); + } + } + } else { + // Node not configured for execution proof requirements + ImportableProofData::NoneRequired + }; + // Block is available, construct `AvailableExecutedBlock` let blobs_available_timestamp = match blob_data { @@ -311,14 +361,27 @@ impl PendingComponents { let AvailabilityPendingExecutedBlock { block, import_data, - payload_verification_outcome, + mut payload_verification_outcome, } = recover(*block.clone(), &self.span)?; + // If execution proofs were required and are now present, upgrade the payload + // verification status to Verified so fork choice reflects proof-backed validity. + match &proof_data { + // TODO(zkproofs): we should persist the proofs in the DB if they must be served over + // RPC for other nodes to sync. + ImportableProofData::Proofs(_) => { + payload_verification_outcome.payload_verification_status = + fork_choice::PayloadVerificationStatus::Verified; + } + ImportableProofData::NoneRequired => {} + } + let available_block = AvailableBlock { block_root: self.block_root, block, blob_data, blobs_available_timestamp, + proof_data, spec: spec.clone(), }; @@ -342,6 +405,7 @@ impl PendingComponents { verified_data_columns: vec![], block: None, reconstruction_started: false, + verified_execution_proofs: HashMap::new(), span, } } @@ -405,6 +469,8 @@ pub struct DataAvailabilityCheckerInner { state_cache: StateLRUCache, custody_context: Arc>, spec: Arc, + /// Minimum execution proofs required for blocks to become available (None = no requirement) + min_execution_proofs_required: Option, } // This enum is only used internally within the crate in the reconstruction function to improve @@ -422,12 +488,14 @@ impl DataAvailabilityCheckerInner { beacon_store: BeaconStore, custody_context: Arc>, spec: Arc, + min_execution_proofs_required: Option, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), custody_context, spec, + min_execution_proofs_required, }) } @@ -575,6 +643,83 @@ impl DataAvailabilityCheckerInner { ) } + /// Puts execution proofs into the availability cache as pending components. + pub fn put_execution_proofs>( + &self, + block_root: Hash256, + execution_proofs: I, + ) -> Result, AvailabilityCheckError> { + let mut execution_proofs = execution_proofs.into_iter().peekable(); + + // TODO(zkproofs): Re-write in the style of put_kzg_verified_data_columns once either: + // - We don't need the `epoch` argument for `update_or_insert_pending_components` or + // - The ExecutionProof includes the block slot + + let Some(epoch) = execution_proofs.peek().and_then(|_proof| { + // Try to determine epoch from existing cached block if available + self.critical + .read() + .peek(&block_root) + .and_then(|pending| pending.epoch()) + }) else { + // If no epoch can be determined, use default max_len + let default_max_len = 6; // Default max blobs per block + let mut write_lock = self.critical.write(); + + let mut pending_components = write_lock + .pop_entry(&block_root) + .map(|(_, v)| v) + .unwrap_or_else(|| PendingComponents::empty(block_root, default_max_len)); + + pending_components.merge_execution_proofs(execution_proofs); + + debug!( + component = "execution_proofs", + ?block_root, + proof_count = pending_components.execution_proof_count(), + "Execution proofs added to data availability checker" + ); + + write_lock.put(block_root, pending_components); + return Ok(Availability::MissingComponents(block_root)); + }; + + let mut write_lock = self.critical.write(); + + let mut pending_components = write_lock + .pop_entry(&block_root) + .map(|(_, v)| v) + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); + + pending_components.merge_execution_proofs(execution_proofs); + + let num_expected_columns = self + .custody_context + .num_of_data_columns_to_sample(epoch, &self.spec); + debug!( + component = "execution_proofs", + ?block_root, + proof_count = pending_components.execution_proof_count(), + "Execution proofs added to data availability checker" + ); + + if let Some(available_block) = pending_components.make_available( + &self.spec, + self.min_execution_proofs_required, + Some(num_expected_columns), + |block, span| self.state_cache.recover_pending_executed_block(block, span), + )? { + write_lock.put(block_root, pending_components); + drop(write_lock); + Ok(Availability::Available(Box::new(available_block))) + } else { + write_lock.put(block_root, pending_components); + Ok(Availability::MissingComponents(block_root)) + } + } + fn check_availability_and_cache_components( &self, block_root: Hash256, @@ -583,6 +728,7 @@ impl DataAvailabilityCheckerInner { ) -> Result, AvailabilityCheckError> { if let Some(available_block) = pending_components.make_available( &self.spec, + self.min_execution_proofs_required, num_expected_columns_opt, |block, span| self.state_cache.recover_pending_executed_block(block, span), )? { @@ -1028,6 +1174,7 @@ mod test { test_store, custody_context, spec.clone(), + None, ) .expect("should create cache"), ); diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 7b04a36faec..ed46cb263f0 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -230,6 +230,7 @@ pub enum BeaconChainError { columns_found: usize, }, FailedToReconstructBlobs(String), + ExecutionProofError(String), ProposerCacheIncorrectState { state_decision_block_root: Hash256, requested_decision_block_root: Hash256, diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index 63be944eea2..8ed187191c2 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -1,4 +1,4 @@ -pub use eth2::types::{EventKind, SseBlock, SseFinalizedCheckpoint, SseHead}; +pub use eth2::types::{EventKind, SseBlock, SseExecutionProof, SseFinalizedCheckpoint, SseHead}; use tokio::sync::broadcast; use tokio::sync::broadcast::{Receiver, Sender, error::SendError}; use tracing::trace; @@ -12,6 +12,7 @@ pub struct ServerSentEventHandler { block_tx: Sender>, blob_sidecar_tx: Sender>, data_column_sidecar_tx: Sender>, + execution_proof_tx: Sender>, finalized_tx: Sender>, head_tx: Sender>, exit_tx: Sender>, @@ -39,6 +40,7 @@ impl ServerSentEventHandler { let (block_tx, _) = broadcast::channel(capacity); let (blob_sidecar_tx, _) = broadcast::channel(capacity); let (data_column_sidecar_tx, _) = broadcast::channel(capacity); + let (execution_proof_tx, _) = broadcast::channel(capacity); let (finalized_tx, _) = broadcast::channel(capacity); let (head_tx, _) = broadcast::channel(capacity); let (exit_tx, _) = broadcast::channel(capacity); @@ -60,6 +62,7 @@ impl ServerSentEventHandler { block_tx, blob_sidecar_tx, data_column_sidecar_tx, + execution_proof_tx, finalized_tx, head_tx, exit_tx, @@ -106,6 +109,10 @@ impl ServerSentEventHandler { .data_column_sidecar_tx .send(kind) .map(|count| log_count("data_column_sidecar", count)), + EventKind::ExecutionProof(_) => self + .execution_proof_tx + .send(kind) + .map(|count| log_count("execution_proof", count)), EventKind::FinalizedCheckpoint(_) => self .finalized_tx .send(kind) @@ -188,6 +195,10 @@ impl ServerSentEventHandler { self.data_column_sidecar_tx.subscribe() } + pub fn subscribe_execution_proof(&self) -> Receiver> { + self.execution_proof_tx.subscribe() + } + pub fn subscribe_finalized(&self) -> Receiver> { self.finalized_tx.subscribe() } @@ -264,6 +275,10 @@ impl ServerSentEventHandler { self.data_column_sidecar_tx.receiver_count() > 0 } + pub fn has_execution_proof_subscribers(&self) -> bool { + self.execution_proof_tx.receiver_count() > 0 + } + pub fn has_finalized_subscribers(&self) -> bool { self.finalized_tx.receiver_count() > 0 } diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index f0cab06ca3d..1ced6d4d7b6 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -136,6 +136,15 @@ async fn notify_new_payload( .ok_or(ExecutionPayloadError::NoExecutionConnection)?; let execution_block_hash = block.execution_payload()?.block_hash(); + + if chain.config.stateless_validation { + debug!( + execution_block_hash = ?execution_block_hash, + "Stateless validation enabled - marking payload as optimistic DA checker will check for proofs" + ); + return Ok(PayloadVerificationStatus::Optimistic); + } + let new_payload_response = execution_layer.notify_new_payload(block.try_into()?).await; match new_payload_response { @@ -452,6 +461,16 @@ where .as_ref() .ok_or(BlockProductionError::ExecutionLayerMissing)?; + // Check for stateless validation mode + if chain.config.stateless_validation { + // TODO(zkproofs): We could return an empty payload here until we hook up mev-boost + debug!( + "ERROR: Cannot produce blocks in stateless validation mode - no execution layer attached. \ + TODO: Use MEV-boost for block production in stateless validation mode." + ); + return Err(BlockProductionError::ExecutionLayerMissing); + } + let parent_hash = if !is_merge_transition_complete { let is_terminal_block_hash_set = spec.terminal_block_hash != ExecutionBlockHash::zero(); let is_activation_epoch_reached = diff --git a/beacon_node/beacon_chain/src/execution_proof_generation.rs b/beacon_node/beacon_chain/src/execution_proof_generation.rs new file mode 100644 index 00000000000..db82a3ddd50 --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_proof_generation.rs @@ -0,0 +1,300 @@ +//! Execution proof generation and verification +//! +//! This module handles the generation and verification of execution proofs. +//! Currently implements dummy proof generation, but will be replaced with +//! actual proof generation from zkVMs or other proof systems. + +use tracing::debug; +use types::{ + EthSpec, ExecutionPayload, ExecutionProof, Hash256, + execution_proof_subnet_id::ExecutionProofSubnetId, +}; + +/// Generate a proof for an execution payload +/// +/// TODO(zkproofs): Currently generates dummy proofs. Will be replaced with actual proof generation +/// from zkVMs or other proof systems. +/// +/// This accepts the concrete ExecutionPayload type which is what the EL expects +/// and can be easily serialized for sending to external systems. +/// The execution_state_witness would be obtained from the EL (e.g., via debug_executionWitness) +pub async fn generate_proof( + block_root: Hash256, + payload: &ExecutionPayload, + execution_state_witness: &[u8], + proof_id: ExecutionProofSubnetId, +) -> ExecutionProof { + let execution_block_hash = payload.block_hash(); + let block_number = payload.block_number(); + + // Simulate (some) proof computation delay + // In a real implementation, this would be the time needed for zkVM local proof generation + // or communication with external proof generation services + use rand::{Rng, rng}; + let delay_ms = rng().random_range(1000..=3000); + + debug!( + execution_block_hash = ?execution_block_hash, + subnet_id = *proof_id, + delay_ms, + "Simulating proof generation delay" + ); + + tokio::time::sleep(tokio::time::Duration::from_millis(delay_ms)).await; + + // Create dummy proof data that includes the subnet information and payload details + // In a real implementation, this would use the execution_state_witness to generate + // a cryptographic proof of the payload's validity + let dummy_data = format!( + "dummy_proof_subnet_{}_block_{:?}_number_{}_witness_len_{}", + *proof_id, + execution_block_hash, + block_number, + execution_state_witness.len() + ) + .into_bytes(); + + ExecutionProof::new(block_root, execution_block_hash, proof_id, 1, dummy_data) +} + +/// Validate a proof (placeholder implementation) +/// +/// TODO(zkproofs): Implement actual cryptographic proof validation based on version and type +pub fn validate_proof(proof: &ExecutionProof) -> bool { + // Placeholder validation - in reality this would verify cryptographic proofs + // based on both proof_id and version + match proof.version { + 1 => { + // Version 1: basic validation - non-empty proof data + !proof.proof_data.is_empty() + } + _ => { + // Unknown versions are considered invalid + false + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use types::{ + ExecutionBlockHash, ExecutionPayloadBellatrix, FixedBytesExtended, FullPayloadBellatrix, + Hash256, MainnetEthSpec, Uint256, + }; + + #[tokio::test] + async fn test_generate_proof() { + let execution_block_hash = ExecutionBlockHash::from(Hash256::random()); + let proof_id = ExecutionProofSubnetId::new(5).unwrap(); + + // Create a dummy payload for testing + let payload = FullPayloadBellatrix:: { + execution_payload: ExecutionPayloadBellatrix:: { + parent_hash: ExecutionBlockHash::zero(), + fee_recipient: Default::default(), + state_root: Hash256::zero(), + receipts_root: Hash256::zero(), + logs_bloom: Default::default(), + prev_randao: Hash256::zero(), + block_number: 12345, + gas_limit: 30_000_000, + gas_used: 0, + timestamp: 0, + extra_data: Default::default(), + base_fee_per_gas: Uint256::from(1u64), + block_hash: execution_block_hash, + transactions: Default::default(), + }, + }; + + let exec_payload = ExecutionPayload::Bellatrix(payload.execution_payload); + let dummy_witness = b"test_witness_data"; + let proof = generate_proof(Hash256::random(), &exec_payload, dummy_witness, proof_id).await; + + assert_eq!(proof.block_hash, execution_block_hash); + assert_eq!(proof.subnet_id, proof_id); + assert_eq!(proof.version, 1); + assert!(!proof.proof_data.is_empty()); + assert!(validate_proof(&proof)); + + // Verify the proof data contains expected information + let proof_data_str = String::from_utf8_lossy(&proof.proof_data); + assert!(proof_data_str.contains("subnet_5")); + assert!(proof_data_str.contains("number_12345")); + assert!(proof_data_str.contains("witness_len_17")); // 17 is the length of "test_witness_data" + } + + #[test] + fn test_validate_proof() { + let hash = ExecutionBlockHash::from(Hash256::random()); + + // Test version 1 proof (supported) + let v1_proof = ExecutionProof::new( + Hash256::random(), + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + assert!(validate_proof(&v1_proof)); + + // Test unsupported version + let v2_proof = ExecutionProof::new( + Hash256::random(), + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 2, + vec![7, 8, 9], + ); + assert!(!validate_proof(&v2_proof)); // Should fail validation for unknown version + + // Test empty data with version 1 (should be invalid) + let empty_v1 = ExecutionProof::new( + Hash256::random(), + hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![], + ); + assert!(!validate_proof(&empty_v1)); + } + + #[tokio::test] + async fn test_generate_proof_different_subnets() { + let execution_block_hash = ExecutionBlockHash::from(Hash256::random()); + + // Create a dummy payload for testing + let payload = FullPayloadBellatrix:: { + execution_payload: ExecutionPayloadBellatrix:: { + parent_hash: ExecutionBlockHash::zero(), + fee_recipient: Default::default(), + state_root: Hash256::zero(), + receipts_root: Hash256::zero(), + logs_bloom: Default::default(), + prev_randao: Hash256::zero(), + block_number: 42, + gas_limit: 0, + gas_used: 0, + timestamp: 0, + extra_data: Default::default(), + base_fee_per_gas: Uint256::from(0u64), + block_hash: execution_block_hash, + transactions: Default::default(), + }, + }; + + let exec_payload = ExecutionPayload::Bellatrix(payload.execution_payload); + let dummy_witness = b"test_witness_data"; + + let proof_0 = generate_proof( + Hash256::random(), + &exec_payload, + dummy_witness, + ExecutionProofSubnetId::new(0).unwrap(), + ) + .await; + let proof_1 = generate_proof( + Hash256::random(), + &exec_payload, + dummy_witness, + ExecutionProofSubnetId::new(1).unwrap(), + ) + .await; + let proof_2 = generate_proof( + Hash256::random(), + &exec_payload, + dummy_witness, + ExecutionProofSubnetId::new(2).unwrap(), + ) + .await; + + // All proofs should be for the same block hash + assert_eq!(proof_0.block_hash, execution_block_hash); + assert_eq!(proof_1.block_hash, execution_block_hash); + assert_eq!(proof_2.block_hash, execution_block_hash); + + // But should have different proof IDs and data + assert_eq!(*proof_0.subnet_id, 0); + assert_eq!(*proof_1.subnet_id, 1); + assert_eq!(*proof_2.subnet_id, 2); + + // Proof data should be different for different subnets + assert_ne!(proof_0.proof_data, proof_1.proof_data); + assert_ne!(proof_1.proof_data, proof_2.proof_data); + + let data_0 = String::from_utf8_lossy(&proof_0.proof_data); + let data_1 = String::from_utf8_lossy(&proof_1.proof_data); + let data_2 = String::from_utf8_lossy(&proof_2.proof_data); + + assert!(data_0.contains("subnet_0")); + assert!(data_1.contains("subnet_1")); + assert!(data_2.contains("subnet_2")); + } + + #[tokio::test] + async fn test_generate_proof_deterministic() { + // Test that proof generation is deterministic - same input always produces same output + let execution_block_hash = ExecutionBlockHash::from(Hash256::from_low_u64_be(12345)); + let proof_id = ExecutionProofSubnetId::new(3).unwrap(); + + // Create a specific payload with fixed values + let payload = FullPayloadBellatrix:: { + execution_payload: ExecutionPayloadBellatrix:: { + parent_hash: ExecutionBlockHash::from(Hash256::from_low_u64_be(111)), + fee_recipient: Default::default(), + state_root: Hash256::from_low_u64_be(222), + receipts_root: Hash256::from_low_u64_be(333), + logs_bloom: Default::default(), + prev_randao: Hash256::from_low_u64_be(444), + block_number: 555, + gas_limit: 30_000_000, + gas_used: 15_000_000, + timestamp: 1234567890, + extra_data: b"test_extra_data".to_vec().into(), + base_fee_per_gas: Uint256::from(7u64), + block_hash: execution_block_hash, + transactions: vec![b"tx1".to_vec().into(), b"tx2".to_vec().into()].into(), + }, + }; + + let exec_payload = ExecutionPayload::Bellatrix(payload.execution_payload); + let witness_data = b"deterministic_witness_data"; + + // Generate proof multiple times with same input + let block_root = Hash256::random(); + let proof1 = generate_proof(block_root, &exec_payload, witness_data, proof_id).await; + let proof2 = generate_proof(block_root, &exec_payload, witness_data, proof_id).await; + let proof3 = generate_proof(block_root, &exec_payload, witness_data, proof_id).await; + + // All proofs should be identical + assert_eq!(proof1.block_hash, proof2.block_hash); + assert_eq!(proof1.block_hash, proof3.block_hash); + + assert_eq!(proof1.subnet_id, proof2.subnet_id); + assert_eq!(proof1.subnet_id, proof3.subnet_id); + + assert_eq!(proof1.version, proof2.version); + assert_eq!(proof1.version, proof3.version); + + // Most importantly, proof data should be identical + assert_eq!(proof1.proof_data, proof2.proof_data); + assert_eq!(proof1.proof_data, proof3.proof_data); + + // Verify the content is as expected + let proof_str = String::from_utf8_lossy(&proof1.proof_data); + assert!(proof_str.contains("subnet_3")); + assert!(proof_str.contains("number_555")); + assert!(proof_str.contains("witness_len_26")); + + // Now test that different inputs produce different proofs + let different_witness = b"different_witness_data"; + let proof_different = + generate_proof(block_root, &exec_payload, different_witness, proof_id).await; + + // Same block hash and subnet, but different proof data + assert_eq!(proof_different.block_hash, proof1.block_hash); + assert_eq!(proof_different.subnet_id, proof1.subnet_id); + assert_ne!(proof_different.proof_data, proof1.proof_data); + } +} diff --git a/beacon_node/beacon_chain/src/execution_proof_network.rs b/beacon_node/beacon_chain/src/execution_proof_network.rs new file mode 100644 index 00000000000..d74f43a9d1a --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_proof_network.rs @@ -0,0 +1,127 @@ +use crate::{BeaconChain, BeaconChainTypes}; +use std::sync::Arc; +use tracing::{debug, info, warn}; +use types::{BeaconBlockRef, ExecutionPayload, ExecutionProofSubnetId, FullPayload, Hash256}; + +/// Spawn a background task to generate and store execution proofs with publishing via callback +/// This provides a clean interface for both HTTP API (publish_blocks) and gossip processing (process_block) +pub fn spawn_proof_generation_task_with_publishing( + chain: &Arc>, + block: BeaconBlockRef<'_, T::EthSpec, FullPayload>, + block_root: Hash256, + publish_fn: F, + task_name: &'static str, +) where + T: BeaconChainTypes, + F: Fn(ExecutionProofSubnetId, types::ExecutionProof) + Send + 'static, +{ + let chain_clone = chain.clone(); + + // Extract the concrete ExecutionPayload from the BeaconBlock + let payload: ExecutionPayload = match block.body().execution_payload() { + Ok(payload) => payload.into(), + Err(_) => { + warn!("Attempting proof generation with a pre-merge block",); + return; + } + }; + + // Spawn the proof generation task in the background + chain.task_executor.spawn( + async move { + let execution_block_hash = payload.block_hash(); + + info!( + execution_block_hash = ?execution_block_hash, + block_root = ?block_root, + "Starting execution proof generation" + ); + + // Simulate execution witness data (in production, this would come from EL) + let witness = format!("dummy_witness_for_block_{:?}", execution_block_hash).into_bytes(); + + // Get configured subnets for proof generation + let proof_subnets = get_configured_proof_subnets(&chain_clone); + + debug!( + execution_block_hash = ?execution_block_hash, + subnet_count = proof_subnets.len(), + subnets = ?proof_subnets, + "Generating proofs for configured subnets" + ); + + // Generate and store a proof for each subnet + for subnet_id in proof_subnets { + let proof_id = match ExecutionProofSubnetId::new(subnet_id) { + Ok(id) => id, + Err(e) => { + debug!(subnet_id, error = %e, "Invalid subnet ID, skipping"); + continue; + } + }; + + // Generate proof using the execution_proof_generation module + let proof = crate::execution_proof_generation::generate_proof( + block_root, &payload, &witness, proof_id, + ) + .await; + + let verified_proof = match crate::execution_proof_verification::GossipVerifiedExecutionProof::< + T, + >::new(Arc::new(proof.clone()), proof_id, &chain_clone) + { + Ok(verified) => verified, + Err(e) => { + warn!( + execution_block_hash = ?execution_block_hash, + subnet_id, + error = ?e, + "Failed to verify locally generated execution proof" + ); + continue; // Skip this proof and continue with next subnet + } + }; + + // Store in local DA checker + match chain_clone + .data_availability_checker + .put_gossip_verified_execution_proofs(block_root, std::iter::once(verified_proof)) + { + Ok(_) => { + debug!( + execution_block_hash = ?execution_block_hash, + subnet_id, + "Generated and stored execution proof locally" + ); + // Let the caller handle publishing via their specific network interface + publish_fn(proof_id, proof); + } + Err(e) => { + warn!( + execution_block_hash = ?execution_block_hash, + subnet_id, + error = ?e, + "Failed to store generated execution proof" + ); + } + } + } + }, + task_name, + ); +} + +/// Get configured proof subnets for this node +pub fn get_configured_proof_subnets(chain: &Arc>) -> Vec { + // TODO(zkproofs): For now, the node will generate proofs for all available subnets. In the + // future, they should be able to configure this for proofs they can generate for. Mainly for + // altruistic nodes that want to seed the network. + // + // TODO(zkproofs): Check if there are any assumptions on the proof being deterministic ie + // whether its okay that two nodes generate two valid proofs for the same payload. + if chain.config.generate_execution_proofs { + (0..types::execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS).collect() + } else { + vec![] + } +} diff --git a/beacon_node/beacon_chain/src/execution_proof_verification.rs b/beacon_node/beacon_chain/src/execution_proof_verification.rs new file mode 100644 index 00000000000..9ed980a1381 --- /dev/null +++ b/beacon_node/beacon_chain/src/execution_proof_verification.rs @@ -0,0 +1,236 @@ +//! Verification of execution proofs received via gossip. +//! +//! This module provides gossip verification for execution proofs, similar to how +//! blob_verification.rs handles blob sidecars. + +use derivative::Derivative; +use std::marker::PhantomData; +use std::sync::Arc; +use std::time::Duration; + +use crate::BeaconChainError; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::execution_proof_generation; +use crate::observed_data_sidecars::{ObservationStrategy, Observe}; +use slot_clock::SlotClock; +use strum::IntoStaticStr; +use types::execution_proof_subnet_id::ExecutionProofSubnetId; +use types::{ExecutionProof, Hash256, Slot}; + +/// An error occurred while validating a gossip execution proof. +#[derive(Debug, IntoStaticStr)] +pub enum GossipExecutionProofError { + /// The execution proof is from a slot that is later than the current slot. + FutureSlot { + message_slot: Slot, + latest_permissible_slot: Slot, + }, + /// The execution proof is from a slot that is prior to the earliest permissible slot. + PastSlot { + message_slot: Slot, + earliest_permissible_slot: Slot, + }, + /// The subnet ID does not match the proof's subnet ID. + InvalidSubnetId { expected: u64, received: u64 }, + /// The execution proof failed cryptographic verification. + InvalidProof { reason: String }, + /// The execution proof is structurally invalid. + InvalidStructure { reason: String }, + /// Some other error occurred. + BeaconChainError(Box), +} + +impl From for GossipExecutionProofError { + fn from(e: BeaconChainError) -> Self { + Self::BeaconChainError(e.into()) + } +} + +/// A wrapper around an `ExecutionProof` that has been verified for propagation on the gossip network. +#[derive(Derivative)] +#[derivative(Clone(bound = "T: Clone"))] +pub struct GossipVerifiedExecutionProof { + block_root: Hash256, + proof: VerifiedExecutionProof, + _phantom: PhantomData<(T, O)>, +} + +/// A wrapper around an `ExecutionProof` that has been cryptographically verified. +#[derive(Clone)] +pub struct VerifiedExecutionProof { + proof: ExecutionProof, + seen_timestamp: Duration, +} + +impl VerifiedExecutionProof { + /// Create a new verified execution proof with a seen timestamp. + pub fn new(proof: ExecutionProof, seen_timestamp: Duration) -> Self { + Self { + proof, + seen_timestamp, + } + } + + /// Get the inner execution proof. + pub fn as_proof(&self) -> &ExecutionProof { + &self.proof + } + + /// Get the seen timestamp. + pub fn seen_timestamp(&self) -> Duration { + self.seen_timestamp + } + + /// Convert into the inner execution proof. + pub fn into_inner(self) -> ExecutionProof { + self.proof + } +} + +impl GossipVerifiedExecutionProof { + /// Create a new `GossipVerifiedExecutionProof` after performing gossip verification. + pub fn new( + proof: Arc, + subnet_id: ExecutionProofSubnetId, + chain: &BeaconChain, + ) -> Result { + let seen_timestamp = chain + .slot_clock + .now_duration() + .ok_or(BeaconChainError::UnableToReadSlot)?; + + validate_execution_proof_for_gossip::(proof.clone(), subnet_id, chain)?; + + // Perform cryptographic verification + if !execution_proof_generation::validate_proof(&proof) { + return Err(GossipExecutionProofError::InvalidProof { + reason: "Cryptographic verification failed".to_string(), + }); + } + + Ok(Self { + block_root: proof.block_root, + proof: VerifiedExecutionProof::new((*proof).clone(), seen_timestamp), + _phantom: PhantomData, + }) + } + + /// Construct a `GossipVerifiedExecutionProof` that is assumed to be valid. + /// + /// This should ONLY be used for testing. + pub fn __assumed_valid(proof: Arc) -> Self { + Self { + block_root: proof.block_root, + proof: VerifiedExecutionProof { + proof: (*proof).clone(), + seen_timestamp: Duration::from_secs(0), + }, + _phantom: PhantomData, + } + } + + /// Get the block root of the beacon block this proof is for. + pub fn block_root(&self) -> Hash256 { + self.block_root + } + + /// Get the execution proof. + pub fn as_proof(&self) -> &ExecutionProof { + self.proof.as_proof() + } + + /// Get the subnet ID. + pub fn subnet_id(&self) -> ExecutionProofSubnetId { + self.proof.proof.subnet_id + } + + /// Get the slot of the proof (derived from block). + pub fn slot(&self) -> Slot { + // TODO: This would need to be derived from the block the proof references + // For now, return a placeholder + Slot::new(0) + } + + /// Convert into the inner verified execution proof. + pub fn into_inner(self) -> VerifiedExecutionProof { + self.proof + } +} + +/// Validate an execution proof for gossip according to the rules defined in the consensus specs. +fn validate_execution_proof_for_gossip( + proof: Arc, + subnet_id: ExecutionProofSubnetId, + _chain: &BeaconChain, +) -> Result<(), GossipExecutionProofError> { + // Check subnet ID matches + if proof.subnet_id != subnet_id { + return Err(GossipExecutionProofError::InvalidSubnetId { + expected: *subnet_id, + received: *proof.subnet_id, + }); + } + + // Check structural validity + if !proof.is_structurally_valid() { + return Err(GossipExecutionProofError::InvalidStructure { + reason: "Proof is structurally invalid".to_string(), + }); + } + + // TODO: Add timing validation based on slot + if O::observe() { + // TODO: Add duplicate proof detection + } + // TODO: Add block existence validation + + Ok(()) +} + +/// List of `VerifiedExecutionProof` that can be converted to/from a list of `ExecutionProof`. +pub struct VerifiedExecutionProofList { + verified_proofs: Vec, +} + +impl VerifiedExecutionProofList { + /// Create a new list by verifying a collection of execution proofs. + pub fn new>( + proofs: I, + seen_timestamp: Duration, + ) -> Result { + let mut verified_proofs = Vec::new(); + + for proof in proofs { + // Perform cryptographic verification for each proof + if execution_proof_generation::validate_proof(&proof) { + verified_proofs.push(VerifiedExecutionProof::new(proof, seen_timestamp)); + } else { + return Err(format!( + "Invalid execution proof for subnet {}", + *proof.subnet_id + )); + } + } + + Ok(Self { verified_proofs }) + } + + /// Convert into a vector of verified execution proofs. + pub fn into_vec(self) -> Vec { + self.verified_proofs + } + + /// Get an iterator over the verified execution proofs. + pub fn iter(&self) -> impl Iterator { + self.verified_proofs.iter() + } +} + +impl IntoIterator for VerifiedExecutionProofList { + type Item = VerifiedExecutionProof; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.verified_proofs.into_iter() + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9d8c3dba38f..b08a3c75b61 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -23,6 +23,9 @@ mod early_attester_cache; mod errors; pub mod events; pub mod execution_payload; +pub mod execution_proof_generation; +pub mod execution_proof_network; +pub mod execution_proof_verification; pub mod fetch_blobs; pub mod fork_choice_signal; pub mod fork_revert; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 449b5dd0434..ddb0792e1d5 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3,7 +3,7 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; -use beacon_chain::data_availability_checker::AvailableBlock; +use beacon_chain::data_availability_checker::{AvailableBlock, ImportableProofData}; use beacon_chain::schema_change::migrate_schema; use beacon_chain::test_utils::SyncCommitteeStrategy; use beacon_chain::test_utils::{ @@ -2882,6 +2882,7 @@ async fn weak_subjectivity_sync_test( block_root, Arc::new(corrupt_block), data, + ImportableProofData::NoneRequired, Arc::new(spec), ) }; diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 28ed0cca913..87a13c31cfc 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -121,6 +121,7 @@ pub struct BeaconProcessorQueueLengths { gossip_block_queue: usize, gossip_blob_queue: usize, gossip_data_column_queue: usize, + gossip_execution_proof_queue: usize, delayed_block_queue: usize, status_queue: usize, bbrange_queue: usize, @@ -187,6 +188,7 @@ impl BeaconProcessorQueueLengths { gossip_block_queue: 1024, gossip_blob_queue: 1024, gossip_data_column_queue: 1024, + gossip_execution_proof_queue: 1024, delayed_block_queue: 1024, status_queue: 1024, bbrange_queue: 1024, @@ -579,6 +581,7 @@ pub enum Work { GossipBlock(AsyncFn), GossipBlobSidecar(AsyncFn), GossipDataColumnSidecar(AsyncFn), + GossipExecutionProof(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -641,6 +644,7 @@ pub enum WorkType { GossipBlock, GossipBlobSidecar, GossipDataColumnSidecar, + GossipExecutionProof, DelayedImportBlock, GossipVoluntaryExit, GossipProposerSlashing, @@ -688,6 +692,7 @@ impl Work { Work::GossipBlock(_) => WorkType::GossipBlock, Work::GossipBlobSidecar(_) => WorkType::GossipBlobSidecar, Work::GossipDataColumnSidecar(_) => WorkType::GossipDataColumnSidecar, + Work::GossipExecutionProof(_) => WorkType::GossipExecutionProof, Work::DelayedImportBlock { .. } => WorkType::DelayedImportBlock, Work::GossipVoluntaryExit(_) => WorkType::GossipVoluntaryExit, Work::GossipProposerSlashing(_) => WorkType::GossipProposerSlashing, @@ -873,6 +878,8 @@ impl BeaconProcessor { let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); let mut gossip_blob_queue = FifoQueue::new(queue_lengths.gossip_blob_queue); let mut gossip_data_column_queue = FifoQueue::new(queue_lengths.gossip_data_column_queue); + let mut gossip_execution_proof_queue = + FifoQueue::new(queue_lengths.gossip_execution_proof_queue); let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); let mut status_queue = FifoQueue::new(queue_lengths.status_queue); @@ -1325,6 +1332,9 @@ impl BeaconProcessor { Work::GossipDataColumnSidecar { .. } => { gossip_data_column_queue.push(work, work_id) } + Work::GossipExecutionProof { .. } => { + gossip_execution_proof_queue.push(work, work_id) + } Work::DelayedImportBlock { .. } => { delayed_block_queue.push(work, work_id) } @@ -1416,6 +1426,7 @@ impl BeaconProcessor { WorkType::GossipBlock => gossip_block_queue.len(), WorkType::GossipBlobSidecar => gossip_blob_queue.len(), WorkType::GossipDataColumnSidecar => gossip_data_column_queue.len(), + WorkType::GossipExecutionProof => gossip_execution_proof_queue.len(), WorkType::DelayedImportBlock => delayed_block_queue.len(), WorkType::GossipVoluntaryExit => gossip_voluntary_exit_queue.len(), WorkType::GossipProposerSlashing => gossip_proposer_slashing_queue.len(), @@ -1591,7 +1602,8 @@ impl BeaconProcessor { Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) - | Work::GossipDataColumnSidecar(work) => task_spawner.spawn_async(async move { + | Work::GossipDataColumnSidecar(work) + | Work::GossipExecutionProof(work) => task_spawner.spawn_async(async move { work.await; }), Work::BlobsByRangeRequest(process_fn) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 7f6c97a0f85..e8da6ef161e 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -4686,6 +4686,9 @@ pub fn serve( api_types::EventTopic::DataColumnSidecar => { event_handler.subscribe_data_column_sidecar() } + api_types::EventTopic::ExecutionProof => { + event_handler.subscribe_execution_proof() + } api_types::EventTopic::Attestation => { event_handler.subscribe_attestation() } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 05a4a4b7a4a..bdb410ad4a6 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -4,6 +4,7 @@ use std::future::Future; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; +use beacon_chain::execution_proof_network; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, @@ -110,6 +111,36 @@ pub async fn publish_block>( debug!(slot = %block.slot(), "Signed block received in HTTP API"); + // Generate execution proofs for locally built blocks + if is_locally_built_block && chain.config.generate_execution_proofs { + info!( + execution_block_hash = ?block.message().body().execution_payload().ok().map(|p| p.block_hash()), + "spawn_proof_generation_task_with_publishing called from publish_block" + ); + + let network_tx_clone = network_tx.clone(); + let publish_fn = move |proof_id: types::ExecutionProofSubnetId, + proof: types::ExecutionProof| { + let pubsub_message = + PubsubMessage::ExecutionProofMessage(Box::new((proof_id, Arc::new(proof)))); + if let Err(e) = crate::publish_pubsub_message(&network_tx_clone, pubsub_message) { + warn!( + subnet_id = *proof_id, + error = ?e, + "Failed to publish execution proof to gossip network" + ); + } + }; + + execution_proof_network::spawn_proof_generation_task_with_publishing( + &chain, + block.message(), + block.canonical_root(), + publish_fn, + "http_api_execution_proof_generation", + ); + } + /* actually publish a block */ let publish_block_p2p = move |block: Arc>, sender, diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 89c6c58d4f6..d091126d1f7 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -143,6 +143,9 @@ pub struct Config { /// Flag for advertising a fake CGC to peers for testing ONLY. pub advertise_false_custody_group_count: Option, + + /// Whether stateless validation is enabled. + pub stateless_validation: bool, } impl Config { @@ -368,6 +371,7 @@ impl Default for Config { inbound_rate_limiter_config: None, idontwant_message_size_threshold: DEFAULT_IDONTWANT_MESSAGE_SIZE_THRESHOLD, advertise_false_custody_group_count: None, + stateless_validation: false, } } } diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index bb9ff299c5d..78821c2baf3 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -29,6 +29,8 @@ pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; /// The ENR field specifying the peerdas custody group count. pub const PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY: &str = "cgc"; +/// The ENR field specifying the execution proof subnet bitfield. +pub const EXECUTION_PROOF_SUBNETS_ENR_KEY: &str = "exproofs"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -43,6 +45,9 @@ pub trait Eth2Enr { /// The peerdas custody group count associated with the ENR. fn custody_group_count(&self, spec: &ChainSpec) -> Result; + /// The execution proof subnets bitfield associated with the ENR. + fn execution_proof_subnets(&self) -> Result; + /// The next fork digest associated with the ENR. fn next_fork_digest(&self) -> Result<[u8; 4], &'static str>; @@ -85,6 +90,12 @@ impl Eth2Enr for Enr { } } + fn execution_proof_subnets(&self) -> Result { + self.get_decodable::(EXECUTION_PROOF_SUBNETS_ENR_KEY) + .ok_or("ENR execution proof subnets non-existent")? + .map_err(|_| "Could not decode the ENR execution proof subnets") + } + fn next_fork_digest(&self) -> Result<[u8; 4], &'static str> { self.get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY) .ok_or("ENR next fork digest non-existent")? @@ -278,6 +289,19 @@ pub fn build_enr( &bitfield.as_ssz_bytes().into(), ); + // set the "exproofs" field on our ENR + // Default to 0 (no subnets) + let execution_proof_subnets: u8 = if config.stateless_validation { + // If stateless validation is enabled, advertise all subnets + // as a default. + // TODO(zkproofs): In the future, we can allow nodes to choose + // certain subnets based on their k-out-of-n appetite + 0xFF + } else { + 0 + }; + builder.add_value(EXECUTION_PROOF_SUBNETS_ENR_KEY, &execution_proof_subnets); + // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { let custody_group_count = if let Some(cgc) = custody_group_count { @@ -317,11 +341,12 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and - // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will - // likely only be true for non-validating nodes. + // PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY and EXECUTION_PROOF_SUBNETS_ENR_KEY keys to match, + // otherwise we use a new ENR. This will likely only be true for non-validating nodes. && local_enr.get_decodable::(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get_decodable(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get_decodable::(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get_decodable(SYNC_COMMITTEE_BITFIELD_ENR_KEY) && local_enr.get_decodable::(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) == disk_enr.get_decodable(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY) + && local_enr.get_decodable::(EXECUTION_PROOF_SUBNETS_ENR_KEY) == disk_enr.get_decodable(EXECUTION_PROOF_SUBNETS_ENR_KEY) } /// Loads enr from the given directory diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 49de62546df..e961f634f35 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -560,6 +560,8 @@ impl Discovery { } // Data column subnets are computed from node ID. No subnet bitfield in the ENR. Subnet::DataColumn(_) => return Ok(()), + // Execution proof subnets don't use ENR bitfields + Subnet::ExecutionProof(_) => return Ok(()), } // replace the global version @@ -904,6 +906,7 @@ impl Discovery { Subnet::Attestation(_) => "attestation", Subnet::SyncCommittee(_) => "sync_committee", Subnet::DataColumn(_) => "data_column", + Subnet::ExecutionProof(_) => "execution_proof", }; if let Some(v) = metrics::get_int_counter( diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 6e841c25a50..4f539f2517b 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -41,6 +41,7 @@ where false } } + Subnet::ExecutionProof(_) => false, // Not used for peer discovery predicates }); if !predicate { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index ad16bb0421c..92b5d0d8482 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -15,7 +15,7 @@ use std::{ time::{Duration, Instant}, }; use tracing::{debug, error, trace, warn}; -use types::{DataColumnSubnetId, EthSpec, SubnetId, SyncSubnetId}; +use types::{DataColumnSubnetId, EthSpec, ExecutionProofSubnetId, SubnetId, SyncSubnetId}; pub use libp2p::core::Multiaddr; pub use libp2p::identity::Keypair; @@ -43,6 +43,8 @@ struct PeerSubnetInfo { attestation_subnets: HashSet, sync_committees: HashSet, custody_subnets: HashSet, + // TODO(zkproofs): handle this properly + execution_proof_subnets: HashSet, } pub mod config; @@ -1067,6 +1069,7 @@ impl PeerManager { attestation_subnets: HashSet::new(), sync_committees: HashSet::new(), custody_subnets: HashSet::new(), + execution_proof_subnets: HashSet::new(), }; // Populate subnet information from long-lived subnets @@ -1081,6 +1084,9 @@ impl PeerManager { Subnet::DataColumn(id) => { peer_info.custody_subnets.insert(id); } + Subnet::ExecutionProof(id) => { + peer_info.execution_proof_subnets.insert(id); + } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index c289cb9a69c..af6ab5ee5b4 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -105,6 +105,10 @@ impl PeerInfo { Subnet::DataColumn(subnet_id) => { return self.is_assigned_to_custody_subnet(subnet_id); } + Subnet::ExecutionProof(_) => { + // Execution proof subnets don't use metadata bitfields + return false; + } } } false diff --git a/beacon_node/lighthouse_network/src/service/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs index 120b9e6c245..cb5dc3bb3ed 100644 --- a/beacon_node/lighthouse_network/src/service/gossip_cache.rs +++ b/beacon_node/lighthouse_network/src/service/gossip_cache.rs @@ -211,6 +211,8 @@ impl GossipCache { GossipKind::BlsToExecutionChange => self.bls_to_execution_change, GossipKind::LightClientFinalityUpdate => self.light_client_finality_update, GossipKind::LightClientOptimisticUpdate => self.light_client_optimistic_update, + // TODO(zkproofs): maybe configure this better; proofs can be quite large + GossipKind::ExecutionProof(_) => self.data_column_sidecar, // Use same timeout as data columns }; let Some(expire_timeout) = expire_timeout else { return; diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index a0026837e37..dfc60b9bd44 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -17,7 +17,8 @@ use std::sync::Arc; use std::time::Duration; use tracing::{debug, warn}; use types::{ - ChainSpec, DataColumnSubnetId, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId, + ChainSpec, DataColumnSubnetId, EnrForkId, EthSpec, ExecutionProofSubnetId, ForkContext, + SubnetId, SyncSubnetId, execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS, }; pub const NETWORK_KEY_FILENAME: &str = "key"; @@ -288,6 +289,13 @@ pub(crate) fn create_whitelist_filter( for id in 0..spec.data_column_sidecar_subnet_count { add(DataColumnSidecar(DataColumnSubnetId::new(id))); } + // Add execution proof subnets + for id in 0..MAX_EXECUTION_PROOF_SUBNETS { + add(ExecutionProof( + ExecutionProofSubnetId::new(id) + .expect("id is less than MAX_EXECUTION_PROOF_SUBNETS"), + )); + } } gossipsub::WhitelistSubscriptionFilter(possible_hashes) } diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index b8c34f8392b..ca108b8c3b0 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -223,6 +223,7 @@ impl NetworkGlobals { subscribe_all_subnets: self.config.subscribe_all_subnets, subscribe_all_data_column_subnets: self.config.subscribe_all_data_column_subnets, sampling_subnets: self.sampling_subnets.read().clone(), + stateless_validation: self.config.stateless_validation, } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 72f2873def9..cd9342c66dd 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -8,14 +8,14 @@ use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, BlobSidecar, - DataColumnSidecar, DataColumnSubnetId, EthSpec, ForkContext, ForkName, - LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, - SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + DataColumnSidecar, DataColumnSubnetId, EthSpec, ExecutionProof, ExecutionProofSubnetId, + ForkContext, ForkName, LightClientFinalityUpdate, LightClientOptimisticUpdate, + ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, + SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, + SignedBeaconBlockGloas, SignedBlsToExecutionChange, SignedContributionAndProof, + SignedVoluntaryExit, SingleAttestation, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -26,6 +26,8 @@ pub enum PubsubMessage { BlobSidecar(Box<(u64, Arc>)>), /// Gossipsub message providing notification of a [`DataColumnSidecar`] along with the subnet id where it was received. DataColumnSidecar(Box<(DataColumnSubnetId, Arc>)>), + /// Gossipsub message providing notification of an [`ExecutionProof`] along with the subnet id where it was received. + ExecutionProofMessage(Box<(ExecutionProofSubnetId, Arc)>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a `SingleAttestation` with its subnet id. @@ -135,6 +137,9 @@ impl PubsubMessage { PubsubMessage::DataColumnSidecar(column_sidecar_data) => { GossipKind::DataColumnSidecar(column_sidecar_data.0) } + PubsubMessage::ExecutionProofMessage(execution_proof_data) => { + GossipKind::ExecutionProof(execution_proof_data.0) + } PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, PubsubMessage::Attestation(attestation_data) => { GossipKind::Attestation(attestation_data.0) @@ -290,6 +295,15 @@ impl PubsubMessage { )), } } + GossipKind::ExecutionProof(subnet_id) => { + let execution_proof = Arc::new( + ExecutionProof::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?, + ); + Ok(PubsubMessage::ExecutionProofMessage(Box::new(( + *subnet_id, + execution_proof, + )))) + } GossipKind::VoluntaryExit => { let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?; @@ -403,6 +417,7 @@ impl PubsubMessage { PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), PubsubMessage::BlobSidecar(data) => data.1.as_ssz_bytes(), PubsubMessage::DataColumnSidecar(data) => data.1.as_ssz_bytes(), + PubsubMessage::ExecutionProofMessage(data) => data.1.as_ssz_bytes(), PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), @@ -438,6 +453,13 @@ impl std::fmt::Display for PubsubMessage { data.1.slot(), data.1.index, ), + PubsubMessage::ExecutionProofMessage(data) => write!( + f, + "ExecutionProof: subnet: {}, block_hash: {:?}, description: {}", + *data.0, + data.1.block_hash, + data.1.description(), + ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, "Aggregate and Proof: slot: {}, index: {:?}, aggregator_index: {}", diff --git a/beacon_node/lighthouse_network/src/types/subnet.rs b/beacon_node/lighthouse_network/src/types/subnet.rs index 1892dcc83af..9a9aff33050 100644 --- a/beacon_node/lighthouse_network/src/types/subnet.rs +++ b/beacon_node/lighthouse_network/src/types/subnet.rs @@ -1,6 +1,6 @@ use serde::Serialize; use std::time::Instant; -use types::{DataColumnSubnetId, SubnetId, SyncSubnetId}; +use types::{DataColumnSubnetId, ExecutionProofSubnetId, SubnetId, SyncSubnetId}; /// Represents a subnet on an attestation or sync committee `SubnetId`. /// @@ -14,6 +14,8 @@ pub enum Subnet { SyncCommittee(SyncSubnetId), /// Represents a gossipsub data column subnet. DataColumn(DataColumnSubnetId), + /// Represents a gossipsub execution proof subnet. + ExecutionProof(ExecutionProofSubnetId), } /// A subnet to discover peers on along with the instant after which it's no longer useful. diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index b22adfbc487..039e3865501 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -2,7 +2,10 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use strum::AsRefStr; -use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; +use types::{ + ChainSpec, DataColumnSubnetId, EthSpec, ExecutionProofSubnetId, ForkName, SubnetId, + SyncSubnetId, Unsigned, execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS, +}; use crate::Subnet; @@ -16,6 +19,7 @@ pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; pub const BEACON_ATTESTATION_PREFIX: &str = "beacon_attestation_"; pub const BLOB_SIDECAR_PREFIX: &str = "blob_sidecar_"; pub const DATA_COLUMN_SIDECAR_PREFIX: &str = "data_column_sidecar_"; +pub const EXECUTION_PROOF_PREFIX: &str = "execution_proof_"; pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; @@ -31,6 +35,7 @@ pub struct TopicConfig { pub subscribe_all_subnets: bool, pub subscribe_all_data_column_subnets: bool, pub sampling_subnets: HashSet, + pub stateless_validation: bool, } /// Returns all the topics the node should subscribe at `fork_name` @@ -91,6 +96,16 @@ pub fn core_topics_to_subscribe( } } + // Subscribe to all execution proof subnets when stateless validation is enabled + if opts.stateless_validation { + for subnet_id in 0..MAX_EXECUTION_PROOF_SUBNETS { + topics.push(GossipKind::ExecutionProof( + ExecutionProofSubnetId::new(subnet_id) + .expect("subnet_id is less than MAX_EXECUTION_PROOF_SUBNETS"), + )); + } + } + topics } @@ -115,7 +130,8 @@ pub fn is_fork_non_core_topic(topic: &GossipTopic, _fork_name: ForkName) -> bool | GossipKind::SignedContributionAndProof | GossipKind::BlsToExecutionChange | GossipKind::LightClientFinalityUpdate - | GossipKind::LightClientOptimisticUpdate => false, + | GossipKind::LightClientOptimisticUpdate + | GossipKind::ExecutionProof(_) => false, } } @@ -127,6 +143,7 @@ pub fn all_topics_at_fork(fork: ForkName, spec: &ChainSpec) -> Vec(fork, &opts, spec) } @@ -156,6 +173,9 @@ pub enum GossipKind { BlobSidecar(u64), /// Topic for publishing DataColumnSidecars. DataColumnSidecar(DataColumnSubnetId), + /// Topic for publishing execution payload proofs on a particular subnet. + #[strum(serialize = "execution_proof")] + ExecutionProof(ExecutionProofSubnetId), /// Topic for publishing raw attestations on a particular subnet. #[strum(serialize = "beacon_attestation")] Attestation(SubnetId), @@ -191,6 +211,9 @@ impl std::fmt::Display for GossipKind { GossipKind::DataColumnSidecar(column_subnet_id) => { write!(f, "{}{}", DATA_COLUMN_SIDECAR_PREFIX, **column_subnet_id) } + GossipKind::ExecutionProof(subnet_id) => { + write!(f, "{}{}", EXECUTION_PROOF_PREFIX, **subnet_id) + } x => f.write_str(x.as_ref()), } } @@ -279,6 +302,7 @@ impl GossipTopic { GossipKind::Attestation(subnet_id) => Some(Subnet::Attestation(*subnet_id)), GossipKind::SyncCommitteeMessage(subnet_id) => Some(Subnet::SyncCommittee(*subnet_id)), GossipKind::DataColumnSidecar(subnet_id) => Some(Subnet::DataColumn(*subnet_id)), + GossipKind::ExecutionProof(subnet_id) => Some(Subnet::ExecutionProof(*subnet_id)), _ => None, } } @@ -323,6 +347,9 @@ impl std::fmt::Display for GossipTopic { GossipKind::BlsToExecutionChange => BLS_TO_EXECUTION_CHANGE_TOPIC.into(), GossipKind::LightClientFinalityUpdate => LIGHT_CLIENT_FINALITY_UPDATE.into(), GossipKind::LightClientOptimisticUpdate => LIGHT_CLIENT_OPTIMISTIC_UPDATE.into(), + GossipKind::ExecutionProof(index) => { + format!("{}{}", EXECUTION_PROOF_PREFIX, *index) + } }; write!( f, @@ -341,6 +368,7 @@ impl From for GossipKind { Subnet::Attestation(s) => GossipKind::Attestation(s), Subnet::SyncCommittee(s) => GossipKind::SyncCommitteeMessage(s), Subnet::DataColumn(s) => GossipKind::DataColumnSidecar(s), + Subnet::ExecutionProof(s) => GossipKind::ExecutionProof(s), } } } @@ -368,6 +396,11 @@ fn subnet_topic_index(topic: &str) -> Option { return Some(GossipKind::DataColumnSidecar(DataColumnSubnetId::new( index.parse::().ok()?, ))); + } else if let Some(index) = topic.strip_prefix(EXECUTION_PROOF_PREFIX) { + let subnet_id = index.parse::().ok()?; + return ExecutionProofSubnetId::new(subnet_id) + .ok() + .map(GossipKind::ExecutionProof); } None } @@ -522,6 +555,7 @@ mod tests { subscribe_all_subnets: false, subscribe_all_data_column_subnets: false, sampling_subnets: sampling_subnets.clone(), + stateless_validation: false, } } diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 5fc94c29587..d32c6b0096d 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -7,6 +7,11 @@ use crate::{ use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; +use beacon_chain::events::{EventKind, SseExecutionProof}; +use beacon_chain::execution_proof_network; +use beacon_chain::execution_proof_verification::{ + GossipExecutionProofError, GossipVerifiedExecutionProof, +}; use beacon_chain::store::Error; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, @@ -20,6 +25,7 @@ use beacon_chain::{ validator_monitor::{get_block_delay_ms, get_slot_delay_ms}, }; use beacon_processor::{Work, WorkEvent}; +use lighthouse_network::PubsubMessage; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use lighthouse_tracing::{ SPAN_PROCESS_GOSSIP_BLOB, SPAN_PROCESS_GOSSIP_BLOCK, SPAN_PROCESS_GOSSIP_DATA_COLUMN, @@ -37,10 +43,11 @@ use store::hot_cold_store::HotColdDBError; use tracing::{Instrument, Span, debug, error, info, instrument, trace, warn}; use types::{ Attestation, AttestationData, AttestationRef, AttesterSlashing, BlobSidecar, DataColumnSidecar, - DataColumnSubnetId, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, + DataColumnSubnetId, EthSpec, ExecPayload, ExecutionProof, ExecutionProofSubnetId, Hash256, + IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, SingleAttestation, Slot, SubnetId, + SyncCommitteeMessage, SyncSubnetId, beacon_block::BlockImportSource, }; use beacon_processor::work_reprocessing_queue::QueuedColumnReconstruction; @@ -1453,7 +1460,53 @@ impl NetworkBeaconProcessor { } None } - Ok(_) => Some(verified_block), + Ok(_) => { + // Generate execution proofs for gossip blocks if node is + // "altruistic". + // TODO: For now, execution proofs don't have validator signatures + // so anyone can create and submit proofs. + if self.chain.config.generate_execution_proofs { + if let Ok(execution_payload) = + verified_block.block.message().body().execution_payload() + { + info!( + execution_block_hash = ?execution_payload.block_hash(), + block_root = ?verified_block.block_root, + "spawn_proof_generation_task_with_publishing called from gossip_block (Phase 2)" + ); + + let network_tx_clone = self.network_tx.clone(); + let publish_fn = + move |proof_id: types::ExecutionProofSubnetId, + proof: types::ExecutionProof| { + let pubsub_message = PubsubMessage::ExecutionProofMessage( + Box::new((proof_id, Arc::new(proof))), + ); + if let Err(e) = + network_tx_clone.send(crate::service::NetworkMessage::Publish { + messages: vec![pubsub_message], + }) + { + warn!( + subnet_id = *proof_id, + error = ?e, + "Failed to publish execution proof to gossip network (Phase 2)" + ); + } + }; + + execution_proof_network::spawn_proof_generation_task_with_publishing( + &self.chain, + verified_block.block.message(), + verified_block.block_root, + publish_fn, + "gossip_execution_proof_generation", + ); + } + } + + Some(verified_block) + } Err(e) => { error!( error = ?e, @@ -3201,4 +3254,117 @@ impl NetworkBeaconProcessor { write_file(error_path, error.to_string().as_bytes()); } } + + /// Process a gossip execution proof message. + pub async fn process_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + _peer_client: Client, + subnet_id: ExecutionProofSubnetId, + execution_proof: Arc, + _seen_duration: Duration, + ) { + let block_hash = execution_proof.block_hash; + let proof_description = execution_proof.description(); + let subnet_id_u64 = *subnet_id; + + debug!( + %block_hash, + %subnet_id, + description = %proof_description, + "Processing gossip execution proof" + ); + + // Create gossip verified execution proof (includes full verification) + let verified_proof = match GossipVerifiedExecutionProof::::new( + execution_proof.clone(), + subnet_id, + &self.chain, + ) { + Ok(verified) => { + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); + verified + } + Err(e) => { + let penalty = match e { + GossipExecutionProofError::InvalidProof { .. } => { + Some(PeerAction::HighToleranceError) + } + GossipExecutionProofError::InvalidSubnetId { .. } => { + Some(PeerAction::LowToleranceError) + } + GossipExecutionProofError::InvalidStructure { .. } => { + Some(PeerAction::LowToleranceError) + } + GossipExecutionProofError::FutureSlot { .. } + | GossipExecutionProofError::PastSlot { .. } + | GossipExecutionProofError::BeaconChainError(_) => None, + }; + let acceptance = if penalty.is_some() { + MessageAcceptance::Reject + } else { + MessageAcceptance::Ignore + }; + debug!(?block_hash, %subnet_id, error = ?e, "Rejecting gossip execution proof"); + self.propagate_validation_result(message_id, peer_id, acceptance); + if let Some(penalty) = penalty { + self.gossip_penalize_peer(peer_id, penalty, e.into()); + } + return; + } + }; + + // Store the verified proof in the data availability checker + let block_root = verified_proof.block_root(); + + match self + .chain + .check_gossip_execution_proof_availability_and_import( + block_root, + std::iter::once(verified_proof), + ) + .await + { + Ok(availability_status) => { + match availability_status { + AvailabilityProcessingStatus::Imported(imported_root) => { + debug!( + %imported_root, + "Imported fully available block after receiving execution proof" + ); + self.chain.recompute_head_at_current_slot().await; + self.send_sync_message(SyncMessage::GossipBlockProcessResult { + block_root: imported_root, + imported: true, + }); + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + %block_root, + execution_block_hash = %block_hash, + subnet_id = subnet_id_u64, + "Execution proof stored, but block still missing other components" + ); + } + } + + if let Some(event_handler) = self.chain.event_handler.as_ref() { + if event_handler.has_execution_proof_subscribers() { + event_handler.register(EventKind::ExecutionProof( + SseExecutionProof::from_execution_proof(&execution_proof), + )); + } + } + } + Err(e) => { + warn!( + %block_hash, + subnet_id = %subnet_id_u64, + error = ?e, + "Failed to store execution proof in data availability checker" + ); + } + } + } } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 85ccde1d591..e6898d6e7ff 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -248,6 +248,36 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some execution proof. + pub fn send_gossip_execution_proof( + self: &Arc, + message_id: MessageId, + peer_id: PeerId, + peer_client: Client, + subnet_id: ExecutionProofSubnetId, + execution_proof: Arc, + seen_timestamp: Duration, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = async move { + processor + .process_gossip_execution_proof( + message_id, + peer_id, + peer_client, + subnet_id, + execution_proof, + seen_timestamp, + ) + .await + }; + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::GossipExecutionProof(Box::pin(process_fn)), + }) + } + /// Create a new `Work` event for some sync committee signature. pub fn send_gossip_sync_signature( self: &Arc, diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 60fe094bb7c..33d9ba08fda 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -384,6 +384,19 @@ impl Router { ), ) } + PubsubMessage::ExecutionProofMessage(data) => { + let (subnet_id, execution_proof) = *data; + self.handle_beacon_processor_send_result( + self.network_beacon_processor.send_gossip_execution_proof( + message_id, + peer_id, + self.network_globals.client(&peer_id), + subnet_id, + execution_proof, + timestamp_now(), + ), + ) + } PubsubMessage::VoluntaryExit(exit) => { debug!(%peer_id, "Received a voluntary exit"); self.handle_beacon_processor_send_result( diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4bd649ba824..a6f88351a23 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -640,6 +640,17 @@ impl NetworkService { if !topic_kinds.contains(&message.kind()) { topic_kinds.push(message.kind()); } + + // Add detailed debug for execution proof messages + if let PubsubMessage::ExecutionProofMessage(proof_box) = message { + let (subnet_id, proof) = proof_box.as_ref(); + info!( + execution_block_hash = ?proof.block_hash, + block_root = ?proof.block_root, + subnet_id = u64::from(subnet_id), + "Publishing execution proof message to libp2p" + ); + } } debug!( count = messages.len(), diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 2e3b3fde4b0..ab25204c08b 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1476,6 +1476,45 @@ pub fn cli_app() -> Command { Lighthouse and only passed to the EL if initial verification fails.") .display_order(0) ) + .arg( + Arg::new("stateless-validation") + .long("stateless-validation") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Enable stateless validation mode where all new payloads are marked as \ + optimistically valid without verification from the execution layer. This \ + bypasses normal payload validation and should only be used for testing.") + .display_order(0) + ) + .arg( + Arg::new("generate-execution-proofs") + .long("generate-execution-proofs") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Generate execution proofs for all blocks (both produced and received). \ + This makes the node act as a proof generator for the network. \ + Cannot be used with --stateless-validation.") + .display_order(0) + ) + .arg( + Arg::new("stateless-min-proofs-required") + .long("stateless-min-proofs-required") + .value_name("COUNT") + .help("Minimum number of execution proofs required to consider a block valid in \ + stateless validation mode. Only applies when --stateless-validation is enabled. \ + Must be between 1 and max_execution_proof_subnets.") + .action(ArgAction::Set) + .display_order(0) + ) + .arg( + Arg::new("light-client-server") + .long("light-client-server") + .help("DEPRECATED") + .action(ArgAction::SetTrue) + + .help_heading(FLAG_HEADER) + .display_order(0) + ) .arg( Arg::new("disable-light-client-server") .long("disable-light-client-server") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c2599ec0cd9..b30eeab3782 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -792,6 +792,56 @@ pub fn get_config( client_config.chain.optimistic_finalized_sync = !cli_args.get_flag("disable-optimistic-finalized-sync"); + // Stateless validation. + client_config.chain.stateless_validation = cli_args.get_flag("stateless-validation"); + + // Copy stateless validation configuration to network config + client_config.network.stateless_validation = client_config.chain.stateless_validation; + + // Stateless minimum proofs required + if let Some(min_proofs) = + clap_utils::parse_optional::(cli_args, "stateless-min-proofs-required")? + { + if min_proofs == 0 { + return Err("--stateless-min-proofs-required must be at least 1".to_string()); + } + if min_proofs as u64 > client_config.chain.max_execution_proof_subnets { + return Err(format!( + "--stateless-min-proofs-required ({}) cannot exceed max_execution_proof_subnets ({})", + min_proofs, client_config.chain.max_execution_proof_subnets + )); + } + client_config.chain.stateless_min_proofs_required = min_proofs; + // Only validate stateless-validation requirement if the flag was explicitly provided + if !client_config.chain.stateless_validation { + return Err( + "--stateless-min-proofs-required requires --stateless-validation to be enabled" + .to_string(), + ); + } + } + + // Execution proof generation. + client_config.chain.generate_execution_proofs = cli_args.get_flag("generate-execution-proofs"); + + // Validate that stateless nodes cannot generate proofs + if client_config.chain.generate_execution_proofs && client_config.chain.stateless_validation { + return Err("The --generate-execution-proofs flag cannot be used with --stateless-validation. Stateless nodes cannot generate proofs.".to_string()); + } + + // Validate that node's max_execution_proof_subnets doesn't exceed protocol maximum. + // The protocol defines a hard limit of 8 subnets, but individual nodes can choose + // to participate in fewer subnets to reduce resource usage. + if client_config.chain.max_execution_proof_subnets + > types::execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS + { + return Err(format!( + "Node's max_execution_proof_subnets ({}) cannot exceed protocol MAX_EXECUTION_PROOF_SUBNETS ({})", + client_config.chain.max_execution_proof_subnets, + types::execution_proof_subnet_id::MAX_EXECUTION_PROOF_SUBNETS + )); + } + if cli_args.get_flag("genesis-backfill") { client_config.chain.genesis_backfill = true; } diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 6680202a277..18b1934f841 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -382,6 +382,10 @@ Options: full [default: 1] --state-cache-size Specifies the size of the state cache [default: 128] + --stateless-min-proofs-required + Minimum number of execution proofs required to consider a block valid + in stateless validation mode. Only applies when --stateless-validation + is enabled. Must be between 1 and max_execution_proof_subnets. --suggested-fee-recipient Emergency fallback fee recipient for use in case the validator client does not have one configured. You should set this flag on the @@ -495,6 +499,10 @@ Flags: --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses. + --generate-execution-proofs + Generate execution proofs for all blocks (both produced and received). + This makes the node act as a proof generator for the network. Cannot + be used with --stateless-validation. --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint syncing. @@ -563,6 +571,11 @@ Flags: Standard option for a staking beacon node. This will enable the HTTP server on localhost:5052 and import deposit logs from the execution node. + --stateless-validation + Enable stateless validation mode where all new payloads are marked as + optimistically valid without verification from the execution layer. + This bypasses normal payload validation and should only be used for + testing. --stdin-inputs If present, read all user inputs from stdin instead of tty. --subscribe-all-subnets diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 8f553b57d9c..549de1c811c 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1051,6 +1051,27 @@ impl SseDataColumnSidecar { } } +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct SseExecutionProof { + pub block_root: Hash256, + pub block_hash: ExecutionBlockHash, + #[serde(with = "serde_utils::quoted_u64")] + pub subnet_id: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub version: u64, +} + +impl SseExecutionProof { + pub fn from_execution_proof(execution_proof: &ExecutionProof) -> SseExecutionProof { + SseExecutionProof { + block_root: execution_proof.block_root, + block_hash: execution_proof.block_hash, + subnet_id: *execution_proof.subnet_id, + version: execution_proof.version, + } + } +} + #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseFinalizedCheckpoint { pub block: Hash256, @@ -1202,6 +1223,7 @@ pub enum EventKind { Block(SseBlock), BlobSidecar(SseBlobSidecar), DataColumnSidecar(SseDataColumnSidecar), + ExecutionProof(SseExecutionProof), FinalizedCheckpoint(SseFinalizedCheckpoint), Head(SseHead), VoluntaryExit(SignedVoluntaryExit), @@ -1226,6 +1248,7 @@ impl EventKind { EventKind::Block(_) => "block", EventKind::BlobSidecar(_) => "blob_sidecar", EventKind::DataColumnSidecar(_) => "data_column_sidecar", + EventKind::ExecutionProof(_) => "execution_proof", EventKind::Attestation(_) => "attestation", EventKind::SingleAttestation(_) => "single_attestation", EventKind::VoluntaryExit(_) => "voluntary_exit", @@ -1266,6 +1289,11 @@ impl EventKind { ServerError::InvalidServerSentEvent(format!("Data Column Sidecar: {:?}", e)) })?, )), + "execution_proof" => Ok(EventKind::ExecutionProof( + serde_json::from_str(data).map_err(|e| { + ServerError::InvalidServerSentEvent(format!("Execution Proof: {:?}", e)) + })?, + )), "chain_reorg" => Ok(EventKind::ChainReorg(serde_json::from_str(data).map_err( |e| ServerError::InvalidServerSentEvent(format!("Chain Reorg: {:?}", e)), )?)), @@ -1356,6 +1384,7 @@ pub enum EventTopic { Block, BlobSidecar, DataColumnSidecar, + ExecutionProof, Attestation, SingleAttestation, VoluntaryExit, @@ -1383,6 +1412,7 @@ impl FromStr for EventTopic { "block" => Ok(EventTopic::Block), "blob_sidecar" => Ok(EventTopic::BlobSidecar), "data_column_sidecar" => Ok(EventTopic::DataColumnSidecar), + "execution_proof" => Ok(EventTopic::ExecutionProof), "attestation" => Ok(EventTopic::Attestation), "single_attestation" => Ok(EventTopic::SingleAttestation), "voluntary_exit" => Ok(EventTopic::VoluntaryExit), @@ -1411,6 +1441,7 @@ impl fmt::Display for EventTopic { EventTopic::Block => write!(f, "block"), EventTopic::BlobSidecar => write!(f, "blob_sidecar"), EventTopic::DataColumnSidecar => write!(f, "data_column_sidecar"), + EventTopic::ExecutionProof => write!(f, "execution_proof"), EventTopic::Attestation => write!(f, "attestation"), EventTopic::SingleAttestation => write!(f, "single_attestation"), EventTopic::VoluntaryExit => write!(f, "voluntary_exit"), diff --git a/consensus/types/src/execution_proof.rs b/consensus/types/src/execution_proof.rs new file mode 100644 index 00000000000..892976bea67 --- /dev/null +++ b/consensus/types/src/execution_proof.rs @@ -0,0 +1,141 @@ +//! Execution payload proof message for gossip. + +use crate::execution_proof_subnet_id::ExecutionProofSubnetId; +use crate::{ExecutionBlockHash, Hash256}; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; + +/// Represents a proof for an execution payload. +/// If this proof verifies as true, it is equivalent to the ExecutionLayer +/// specifying that the payload is valid. +/// Multiple proof types can exist for a single execution payload +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode)] +pub struct ExecutionProof { + /// The beacon block root this proof is for + pub block_root: Hash256, + /// The execution block hash this proof attests to + pub block_hash: ExecutionBlockHash, + /// The subnet ID where this proof was received/should be sent (maps to gossip subnet) + pub subnet_id: ExecutionProofSubnetId, + /// Version of the proof format - allows for one subnet to upgrade their proof without all needing to + pub version: u64, + /// Opaque proof data - structure depends on subnet_id and version + /// This will contain cryptographic proofs received via gossip + pub proof_data: Vec, +} + +impl ExecutionProof { + /// Create a new execution proof for gossip + pub fn new( + block_root: Hash256, + block_hash: ExecutionBlockHash, + subnet_id: ExecutionProofSubnetId, + version: u64, + proof_data: Vec, + ) -> Self { + Self { + block_root, + block_hash, + subnet_id, + version, + proof_data, + } + } + + /// Get a description of the proof type based on subnet_id + pub fn description(&self) -> String { + format!("proof id {}", *self.subnet_id) + } + + /// Check if this proof version is supported + pub fn is_version_supported(&self) -> bool { + // TODO(zkproofs): We want each subnet to be able to update their version independently, + // for now it just supports 1. Think of the best structure to use here, noting that there + // could be quite a lot of subnets, if we consider the different zkVM and EL combos. So + // maybe the versioning comes from the middleware that verifies proofs. + matches!(self.version, 1) + } + + /// Validate basic structure of the proof + pub fn is_structurally_valid(&self) -> bool { + // Basic validation: non-empty proof data and supported version + !self.proof_data.is_empty() && self.is_version_supported() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Hash256; + use ssz::{Decode, Encode}; + + #[test] + fn test_execution_proof_creation() { + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + let proof_data = vec![1, 2, 3, 4]; + + let proof = ExecutionProof::new(block_root, block_hash, subnet_id, 1, proof_data.clone()); + + assert_eq!(proof.block_root, block_root); + assert_eq!(proof.block_hash, block_hash); + assert_eq!(proof.subnet_id, subnet_id); + assert_eq!(proof.version, 1); + assert_eq!(proof.proof_data, proof_data); + } + + #[test] + fn test_execution_proof_validation() { + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let subnet_id = ExecutionProofSubnetId::new(0).unwrap(); + + // Valid proof + let valid_proof = ExecutionProof::new(block_root, block_hash, subnet_id, 1, vec![1, 2, 3]); + assert!(valid_proof.is_version_supported()); + assert!(valid_proof.is_structurally_valid()); + + // Invalid version + let invalid_version = + ExecutionProof::new(block_root, block_hash, subnet_id, 99, vec![1, 2, 3]); + assert!(!invalid_version.is_version_supported()); + assert!(!invalid_version.is_structurally_valid()); + + // Empty proof data + let empty_proof = ExecutionProof::new(block_root, block_hash, subnet_id, 1, vec![]); + assert!(empty_proof.is_version_supported()); + assert!(!empty_proof.is_structurally_valid()); + } + + #[test] + fn test_execution_proof_description() { + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from(Hash256::random()); + + let witness_proof = ExecutionProof::new( + block_root, + block_hash, + ExecutionProofSubnetId::new(0).unwrap(), + 1, + vec![1, 2, 3], + ); + assert_eq!(witness_proof.description(), "proof id 0"); + } + + #[test] + fn test_execution_proof_ssz_encoding() { + let block_root = Hash256::random(); + let block_hash = ExecutionBlockHash::from(Hash256::random()); + let subnet_id = ExecutionProofSubnetId::new(2).unwrap(); + let proof_data = vec![10, 20, 30, 40, 50]; + + let original = ExecutionProof::new(block_root, block_hash, subnet_id, 1, proof_data); + + // Test SSZ encoding and decoding + let encoded = original.as_ssz_bytes(); + let decoded = ExecutionProof::from_ssz_bytes(&encoded).expect("should decode successfully"); + + assert_eq!(original, decoded); + } +} diff --git a/consensus/types/src/execution_proof_subnet_id.rs b/consensus/types/src/execution_proof_subnet_id.rs new file mode 100644 index 00000000000..8d9050f0037 --- /dev/null +++ b/consensus/types/src/execution_proof_subnet_id.rs @@ -0,0 +1,133 @@ +//! Identifies each execution proof subnet by an integer identifier. +use serde::{Deserialize, Serialize}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt::{self, Display}; +use std::ops::{Deref, DerefMut}; + +/// Maximum number of execution proof subnets allowed by the protocol. +/// +/// This is a hard protocol limit that defines the total number of proof subnets +/// that can exist in the network. Individual nodes may choose to participate in +/// fewer subnets (configured via max_execution_proof_subnets in ChainConfig), +/// but no node can exceed this protocol maximum. +/// +/// Currently set to 1 subnet for initial rollout. This will be expanded to 3-8 subnets +/// in the future to support client and Proof diversity (multiple independent proofs per block) +pub const MAX_EXECUTION_PROOF_SUBNETS: u64 = 1; + +/// ExecutionProofSubnetId is both the id for the subnet that a particular proof will be on +/// and the proof ID to identify the proof. ie, we have one type of proof per subnet. +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ExecutionProofSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); + +impl ExecutionProofSubnetId { + /// Create an ExecutionProofSubnetId from a u64, validating it's within bounds + /// + /// Note: bounds here relates to the fact that there is a maximum number of subnets + /// that we can have; it is the number of maximum number of proofs that we will accept. + pub fn new(id: u64) -> Result { + if id >= MAX_EXECUTION_PROOF_SUBNETS { + return Err(InvalidSubnetId(id)); + } + Ok(Self(id)) + } +} + +impl Display for ExecutionProofSubnetId { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } +} + +impl Deref for ExecutionProofSubnetId { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for ExecutionProofSubnetId { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From for u64 { + fn from(val: ExecutionProofSubnetId) -> Self { + val.0 + } +} + +impl From<&ExecutionProofSubnetId> for u64 { + fn from(val: &ExecutionProofSubnetId) -> Self { + val.0 + } +} + +#[derive(Debug)] +pub struct InvalidSubnetId(pub u64); + +impl std::fmt::Display for InvalidSubnetId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Invalid execution proof subnet ID: {}, must be < {}", + self.0, MAX_EXECUTION_PROOF_SUBNETS + ) + } +} + +impl std::error::Error for InvalidSubnetId {} + +// Manual SSZ implementations for ExecutionProofSubnetId +impl Encode for ExecutionProofSubnetId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.0.ssz_append(buf) + } +} + +impl Decode for ExecutionProofSubnetId { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + u64::from_ssz_bytes(bytes).map(Self) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_execution_proof_subnet_id_creation() { + for id in 0..MAX_EXECUTION_PROOF_SUBNETS { + let subnet_id = ExecutionProofSubnetId::new(id).unwrap(); + assert_eq!(*subnet_id, id); + } + + assert!(ExecutionProofSubnetId::new(0).is_ok()); + assert!(ExecutionProofSubnetId::new(MAX_EXECUTION_PROOF_SUBNETS).is_err()); + assert!(ExecutionProofSubnetId::new(u64::MAX).is_err()); + } +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 8e83fed1d9a..b27e9cd9871 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -107,6 +107,8 @@ pub mod blob_sidecar; pub mod data_column_custody_group; pub mod data_column_sidecar; pub mod data_column_subnet_id; +pub mod execution_proof; +pub mod execution_proof_subnet_id; pub mod light_client_header; pub mod non_zero_usize; pub mod runtime_fixed_vector; @@ -177,6 +179,8 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; +pub use crate::execution_proof::ExecutionProof; +pub use crate::execution_proof_subnet_id::ExecutionProofSubnetId; pub use crate::execution_requests::{ExecutionRequests, RequestType}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index cdfacbced4b..100ba0af8d6 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -1,12 +1,32 @@ # Full configuration reference [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). participants: + # Stateful proof generator nodes (generate proofs for the network) - el_type: geth el_image: ethereum/client-go:latest cl_type: lighthouse cl_image: lighthouse:local cl_extra_params: - --target-peers=3 - count: 4 + - --generate-execution-proofs + count: 1 + # Stateless validator nodes (consume proofs but cannot generate them) + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + - --stateless-validation + - --stateless-min-proofs-required=3 + count: 2 + # Regular stateful nodes (normal operation, no proof generation) + - el_type: geth + el_image: ethereum/client-go:latest + cl_type: lighthouse + cl_image: lighthouse:local + cl_extra_params: + - --target-peers=3 + count: 1 network_params: electra_fork_epoch: 0 seconds_per_slot: 3