diff --git a/Cargo.lock b/Cargo.lock index a342d3c064..c7a35e5d0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -274,6 +274,7 @@ dependencies = [ "solana-version", "solana-vote", "solana-vote-program", + "solana-votor-messages", "thiserror 2.0.16", "tikv-jemallocator", "tokio", @@ -8805,6 +8806,7 @@ dependencies = [ "solana-vote", "solana-vote-interface", "solana-vote-program", + "solana-votor-messages", "static_assertions", "test-case", "thiserror 2.0.16", @@ -11558,6 +11560,7 @@ dependencies = [ "solana-transaction-error", "solana-turbine", "solana-votor", + "solana-votor-messages", "static_assertions", "test-case", "thiserror 2.0.16", diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index 553e2b86cd..7dfce70dd2 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -14,6 +14,7 @@ use { CODING_SHREDS_PER_FEC_BLOCK, DATA_SHREDS_PER_FEC_BLOCK, }, solana_perf::test_tx, + solana_votor_messages::SliceRoot, test::{black_box, Bencher}, }; @@ -43,7 +44,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(SHRED_SIZE_TYPICAL)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let reed_solomon_cache = ReedSolomonCache::default(); - let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let chained_merkle_root = Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); shredder.entries_to_merkle_shreds_for_tests( @@ -71,7 +72,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) { Some(shred_size), ); let entries = make_large_unchained_entries(txs_per_entry, num_entries); - let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let chained_merkle_root = Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); let reed_solomon_cache = ReedSolomonCache::default(); // 1Mb bencher.iter(|| { @@ -98,7 +99,7 @@ fn bench_deshredder(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let chained_merkle_root = Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); let (data_shreds, _) = shredder.entries_to_merkle_shreds_for_tests( &kp, &entries, @@ -120,7 +121,7 @@ fn bench_deshredder(bencher: &mut Bencher) { fn bench_deserialize_hdr(bencher: &mut Bencher) { let keypair = Keypair::new(); let shredder = Shredder::new(2, 1, 0, 0).unwrap(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); let mut stats = ProcessShredsStats::default(); let reed_solomon_cache = ReedSolomonCache::default(); let mut shreds = shredder @@ -155,7 +156,7 @@ fn bench_shredder_coding(bencher: &mut Bencher) { let entries = make_entries(); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); let reed_solomon_cache = ReedSolomonCache::default(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); bencher.iter(|| { let result: Vec<_> = shredder .make_merkle_shreds_from_entries( @@ -178,7 +179,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) { let entries = make_entries(); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); let reed_solomon_cache = ReedSolomonCache::default(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); let (_data_shreds, mut coding_shreds): (Vec<_>, Vec<_>) = shredder .make_merkle_shreds_from_entries( &Keypair::new(), diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 1fab8e312e..5b4fdee5b1 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -41,6 +41,7 @@ use { MAX_LOCKOUT_HISTORY, }, }, + solana_votor_messages::SliceRoot, std::{ cmp::Ordering, collections::{HashMap, HashSet}, @@ -616,19 +617,19 @@ impl Tower { pub fn record_bank_vote(&mut self, bank: &Bank) -> Option { // Returns the new root if one is made after applying a vote for the given bank to // `self.vote_state` - let block_id = bank.block_id().unwrap_or_else(|| { + let chained_merkle_id = bank.chained_merkle_id().unwrap_or_else(|| { // This can only happen for our leader bank // Note: since the new shred format is yet to be rolled out to all clusters, // this can also happen for non-leader banks. Once rolled out we can assert // here that this is our leader bank. - Hash::default() + SliceRoot(Hash::default()) }); self.record_bank_vote_and_update_lockouts( bank.slot(), bank.hash(), bank.feature_set .is_active(&agave_feature_set::enable_tower_sync_ix::id()), - block_id, + chained_merkle_id, ) } @@ -638,8 +639,9 @@ impl Tower { &mut self, vote_hash: Hash, enable_tower_sync_ix: bool, - block_id: Hash, + chained_merkle_id: SliceRoot, ) { + let block_id = chained_merkle_id.0; let mut new_vote = if enable_tower_sync_ix { VoteTransaction::from(TowerSync::new( self.vote_state.votes.clone(), @@ -664,7 +666,7 @@ impl Tower { vote_slot: Slot, vote_hash: Hash, enable_tower_sync_ix: bool, - block_id: Hash, + chained_merkle_id: SliceRoot, ) -> Option { if let Some(last_voted_slot) = self.vote_state.last_voted_slot() { if vote_slot <= last_voted_slot { @@ -681,7 +683,7 @@ impl Tower { let old_root = self.root(); self.vote_state.process_next_vote_slot(vote_slot); - self.update_last_vote_from_vote_state(vote_hash, enable_tower_sync_ix, block_id); + self.update_last_vote_from_vote_state(vote_hash, enable_tower_sync_ix, chained_merkle_id); let new_root = self.root(); @@ -699,7 +701,7 @@ impl Tower { #[cfg(feature = "dev-context-only-utils")] pub fn record_vote(&mut self, slot: Slot, hash: Hash) -> Option { - self.record_bank_vote_and_update_lockouts(slot, hash, true, Hash::default()) + self.record_bank_vote_and_update_lockouts(slot, hash, true, SliceRoot(Hash::default())) } #[cfg(feature = "dev-context-only-utils")] diff --git a/core/src/repair/repair_weighted_traversal.rs b/core/src/repair/repair_weighted_traversal.rs index 97100d4e47..29a2f4f5b8 100644 --- a/core/src/repair/repair_weighted_traversal.rs +++ b/core/src/repair/repair_weighted_traversal.rs @@ -148,6 +148,7 @@ pub mod test { shred::{ProcessShredsStats, ReedSolomonCache, Shred, Shredder}, }, solana_runtime::bank_utils, + solana_votor_messages::SliceRoot, trees::tr, }; @@ -293,7 +294,7 @@ pub mod test { &keypair, &[], true, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), last_shred as u32, last_shred as u32, &reed_solomon_cache, diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 2c678d6c88..bedeaca215 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -1556,6 +1556,7 @@ mod tests { solana_runtime::bank::Bank, solana_streamer::socket::SocketAddrSpace, solana_time_utils::timestamp, + solana_votor_messages::SliceRoot, std::{io::Cursor, net::Ipv4Addr}, }; @@ -2035,7 +2036,7 @@ mod tests { &keypair, &[], true, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), index as u32, index as u32, &reed_solomon_cache, @@ -2516,7 +2517,7 @@ mod tests { &keypair, &[], true, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), 0, 0, &reed_solomon_cache, diff --git a/core/src/repair/standard_repair_handler.rs b/core/src/repair/standard_repair_handler.rs index 7ea5198f30..c47baa5cfa 100644 --- a/core/src/repair/standard_repair_handler.rs +++ b/core/src/repair/standard_repair_handler.rs @@ -96,13 +96,13 @@ impl StandardRepairHandler { let get_parent_location_meta = |(location, meta): &(BlockLocation, SlotMeta)| { let parent_slot = meta.parent_slot?; - let parent_block_id = self + let parent_chained_merkle_id = self .blockstore .get_parent_block_id_from_location(meta.slot, *location) .ok()??; let parent_location = self .blockstore - .get_block_location(parent_slot, parent_block_id) + .get_block_location(parent_slot, parent_chained_merkle_id.0) .ok()??; let parent_meta = self .blockstore diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 7bb1fdd352..55a36297b8 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -90,7 +90,7 @@ use { voting_utils::GenerateVoteTxResult, votor::{LeaderWindowNotifier, Votor, VotorConfig}, }, - solana_votor_messages::consensus_message::ConsensusMessage, + solana_votor_messages::{consensus_message::ConsensusMessage, SliceRoot}, std::{ collections::{HashMap, HashSet}, num::{NonZeroUsize, Saturating}, @@ -3440,17 +3440,19 @@ impl ReplayStage { // If the block does not have at least DATA_SHREDS_PER_FEC_BLOCK correctly retransmitted // shreds in the last FEC set, mark it dead. let is_leader_block = bank.collector_id() == my_pubkey; - let block_id = match blockstore.check_last_fec_set_and_get_block_id( - bank.slot(), - bank.hash(), - false, - &bank.feature_set, - ) { - Ok(block_id) => block_id, + let (chained_merkle_id, ag_id) = match blockstore + .check_last_fec_set_and_get_block_ids( + bank.slot(), + bank.parent_ag_id(), + bank.hash(), + false, + &bank.feature_set, + ) { + Ok((chained_merkle_id, ag_id)) => (chained_merkle_id, ag_id), Err(result_err) => { if is_leader_block { // Our leader block has not finished shredding - None + (None, None) } else { let root = bank_forks.read().unwrap().root(); Self::mark_dead_slot( @@ -3471,8 +3473,11 @@ impl ReplayStage { } }; - if bank.block_id().is_none() { - bank.set_block_id(block_id); + if bank.chained_merkle_id().is_none() { + bank.set_chained_merkle_id(block_id); + } + if bank.alpenglow_block_id().is_none() { + bank.set_alpenglow_block_id(ag_id); } let r_replay_stats = replay_stats.read().unwrap(); @@ -3579,7 +3584,7 @@ impl ReplayStage { // 2) Shredding finishes before replay, we notify here // // For non leader banks (2) is always true, so notify here - if *is_alpenglow_migration_complete && bank.block_id().is_some() { + if *is_alpenglow_migration_complete && bank.chained_merkle_id().is_some() { // Leader blocks will not have a block id, broadcast stage will // take care of notifying the voting loop let _ = votor_event_sender.send(VotorEvent::Block(CompletedBlock { @@ -3953,7 +3958,7 @@ impl ReplayStage { // we must have the compatible versions of both duplicates in order to replay `bank` // successfully, so we are once again guaranteed that `bank_vote_state.last_voted_slot()` // is present in bank forks and progress map. - let block_id = { + let chained_merkle_id = { // The block_id here will only be relevant if we need to refresh this last vote. let bank = bank_forks .read() @@ -3964,7 +3969,8 @@ impl ReplayStage { // that means that it was created from a different instance (hot spare setup or a previous restart), // and thus we must have replayed and set the block_id from the shreds. // Note: since the new shred format is not rolled out everywhere, we have to provide a default - bank.block_id().unwrap_or_default() + bank.chained_merkle_id() + .unwrap_or(SliceRoot(Hash::default())) }; tower.update_last_vote_from_vote_state( progress @@ -3972,7 +3978,7 @@ impl ReplayStage { .expect("Must exist for us to have frozen descendant"), bank.feature_set .is_active(&agave_feature_set::enable_tower_sync_ix::id()), - block_id, + chained_merkle_id, ); // Since we are updating our tower we need to update associated caches for previously computed // slots as well. @@ -4682,7 +4688,7 @@ pub(crate) mod tests { slot: Slot, ) -> Arc { let bank = Bank::new_from_parent(parent, collector_id, slot); - bank.set_block_id(Some(Hash::new_unique())); + bank.set_chained_merkle_id(Some(SliceRoot(Hash::new_unique()))); bank_forks .write() .unwrap() @@ -5295,7 +5301,7 @@ pub(crate) mod tests { &keypair, &gibberish, true, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), 0, 0, &reed_solomon_cache, diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index 3f86aa7727..777ba61bbe 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -31,6 +31,7 @@ use { solana_signer::Signer, solana_vote::vote_transaction, solana_vote_program::vote_state::{Lockout, TowerSync}, + solana_votor_messages::SliceRoot, std::{ collections::{HashMap, HashSet, VecDeque}, sync::{Arc, RwLock}, @@ -151,7 +152,7 @@ impl VoteSimulator { new_bank.fill_bank_with_ticks_for_tests(); if !visit.node().has_no_child() || is_frozen { - new_bank.set_block_id(Some(Hash::new_unique())); + new_bank.set_chained_merkle_id(Some(SliceRoot(Hash::new_unique()))); new_bank.freeze(); self.progress .get_fork_stats_mut(new_bank.slot()) @@ -395,7 +396,7 @@ pub fn initialize_state( genesis_config.poh_config.hashes_per_tick = Some(2); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - bank0.set_block_id(Some(Hash::new_unique())); + bank0.set_chained_merkle_id(Some(SliceRoot(Hash::new_unique()))); for pubkey in validator_keypairs_map.keys() { bank0.transfer(10_000, &mint_keypair, pubkey).unwrap(); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index eaecf1150a..a4ff5323a3 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -480,6 +480,7 @@ mod test { solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, solana_time_utils::timestamp, + solana_votor_messages::SliceRoot, }; fn local_entries_to_shred( @@ -494,7 +495,7 @@ mod test { entries, true, // is_last_in_slot // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), 0, // next_shred_index 0, // next_code_index &ReedSolomonCache::default(), diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 7a315554c2..1fce5fb9cf 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -101,6 +101,7 @@ solana-transaction = "=3.0.1" solana-version = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } +solana-votor-messages = { workspace = true } static_assertions = { workspace = true } thiserror = { workspace = true } diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index 9dfb753ca2..592882c9aa 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -347,6 +347,7 @@ pub(crate) mod tests { solana_signature::Signature, solana_signer::Signer, solana_system_transaction::transfer, + solana_votor_messages::SliceRoot, std::sync::Arc, }; @@ -457,7 +458,7 @@ pub(crate) mod tests { &entries, is_last_in_slot, // chained_merkle_root - Some(Hash::new_from_array(rng.gen())), + Some(SliceRoot(Hash::new_from_array(rng.gen()))), next_shred_index, next_code_index, // next_code_index &ReedSolomonCache::default(), diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 43ab4a02fd..bd0a18874e 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -89,6 +89,7 @@ solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } +solana-votor-messages = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index 73e55154f5..05cd1fed7c 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -33,6 +33,7 @@ use { UiTransactionEncoding, VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries, VersionedTransactionWithStatusMeta, }, + solana_votor_messages::SliceRoot, std::{ cell::RefCell, collections::HashMap, @@ -360,8 +361,8 @@ pub struct CliDuplicateShred { index: u32, shred_type: ShredType, version: u16, - merkle_root: Option, - chained_merkle_root: Option, + merkle_root: Option, + chained_merkle_root: Option, last_in_slot: bool, #[serde(with = "serde_bytes")] payload: Vec, diff --git a/ledger/benches/make_shreds_from_entries.rs b/ledger/benches/make_shreds_from_entries.rs index d0571b167e..f8fd5c577f 100644 --- a/ledger/benches/make_shreds_from_entries.rs +++ b/ledger/benches/make_shreds_from_entries.rs @@ -9,6 +9,7 @@ use { solana_packet::PACKET_DATA_SIZE, solana_pubkey::Pubkey, solana_transaction::Transaction, + solana_votor_messages::SliceRoot, std::iter::repeat_with, }; @@ -53,7 +54,7 @@ fn make_shreds_from_entries( keypair: &Keypair, entries: &[Entry], is_last_in_slot: bool, - chained_merkle_root: Option, + chained_merkle_root: Option, reed_solomon_cache: &ReedSolomonCache, stats: &mut ProcessShredsStats, ) -> (Vec, Vec) { @@ -89,7 +90,7 @@ fn run_make_shreds_from_entries( let keypair = Keypair::new(); let data_size = num_packets * PACKET_DATA_SIZE; let entries = make_dummy_entries(&mut rng, data_size); - let chained_merkle_root = Some(make_dummy_hash(&mut rng)); + let chained_merkle_root = Some(SliceRoot(make_dummy_hash(&mut rng))); let reed_solomon_cache = ReedSolomonCache::default(); let mut stats = ProcessShredsStats::default(); // Initialize the thread-pool and warm the Reed-Solomon cache. @@ -143,7 +144,7 @@ fn run_recover_shreds( let keypair = Keypair::new(); let data_size = num_packets * PACKET_DATA_SIZE; let entries = make_dummy_entries(&mut rng, data_size); - let chained_merkle_root = Some(make_dummy_hash(&mut rng)); + let chained_merkle_root = Some(SliceRoot(make_dummy_hash(&mut rng))); let reed_solomon_cache = ReedSolomonCache::default(); let mut stats = ProcessShredsStats::default(); let (data, code) = make_shreds_from_entries( diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index a7f9d86c27..424d1e8aad 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -18,8 +18,9 @@ use { leader_schedule_cache::LeaderScheduleCache, next_slots_iterator::NextSlotsIterator, shred::{ - self, ErasureSetId, ProcessShredsStats, ReedSolomonCache, Shred, ShredData, ShredId, - ShredType, Shredder, DATA_SHREDS_PER_FEC_BLOCK, + self, merkle_tree::make_double_merkle_tree, ErasureSetId, ProcessShredsStats, + ReedSolomonCache, Shred, ShredData, ShredId, ShredType, Shredder, + DATA_SHREDS_PER_FEC_BLOCK, }, slot_stats::{ShredSource, SlotsStats}, transaction_address_lookup_table_scanner::scan_transaction, @@ -63,6 +64,7 @@ use { VersionedConfirmedBlock, VersionedConfirmedBlockWithEntries, VersionedTransactionWithStatusMeta, }, + solana_votor_messages::{AlpenglowBlockId, SliceRoot}, std::{ borrow::Cow, cell::RefCell, @@ -194,7 +196,7 @@ impl AsRef for WorkingEntry { #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct LastFECSetCheckResults { - last_fec_set_merkle_root: Option, + last_fec_set_merkle_root: Option, is_retransmitter_signed: bool, } @@ -202,7 +204,7 @@ impl LastFECSetCheckResults { fn get_last_fec_set_merkle_root( &self, feature_set: &FeatureSet, - ) -> std::result::Result, BlockstoreProcessorError> { + ) -> std::result::Result, BlockstoreProcessorError> { if self.last_fec_set_merkle_root.is_none() { return Err(BlockstoreProcessorError::IncompleteFinalFecSet); } else if feature_set @@ -608,7 +610,7 @@ impl Blockstore { &self, slot: Slot, location: BlockLocation, - ) -> Result> { + ) -> Result> { let Some(shred) = self.get_data_shred_from_location(slot, 0, location)? else { return Ok(None); }; @@ -2375,7 +2377,11 @@ impl Blockstore { /// Checks if the chained merkle root == merkle root /// /// Returns true if no conflict, or if chained merkle roots are not enabled - fn check_chaining(&self, merkle_root: Option, chained_merkle_root: Option) -> bool { + fn check_chaining( + &self, + merkle_root: Option, + chained_merkle_root: Option, + ) -> bool { chained_merkle_root.is_none() // Chained merkle roots have not been enabled yet || chained_merkle_root == merkle_root } @@ -2726,7 +2732,8 @@ impl Blockstore { let mut all_shreds = vec![]; let mut slot_entries = vec![]; let reed_solomon_cache = ReedSolomonCache::default(); - let mut chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let mut chained_merkle_root = + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); // Find all the entries for start_slot for entry in entries.into_iter() { if remaining_ticks_in_slot == 0 { @@ -4142,19 +4149,21 @@ impl Blockstore { self.get_slot_entries_in_block(slot, vec![range], slot_meta) } - /// Performs checks on the last fec set of a replayed slot, and returns the block_id. + /// Performs checks on the last fec set of a replayed slot, and returns: chained merkle id, alpenglow block_id. /// Returns: /// - BlockstoreProcessorError::IncompleteFinalFecSet /// if the last fec set is not full /// - BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet /// if the last fec set is not signed by retransmitters - pub fn check_last_fec_set_and_get_block_id( + pub fn check_last_fec_set_and_get_block_ids( &self, slot: Slot, + parent_ag_id: Option, bank_hash: Hash, is_leader: bool, feature_set: &FeatureSet, - ) -> std::result::Result, BlockstoreProcessorError> { + ) -> std::result::Result<(Option, Option), BlockstoreProcessorError> + { let results = self.check_last_fec_set(slot); let Ok(results) = results else { if !is_leader { @@ -4166,14 +4175,50 @@ impl Blockstore { if feature_set.is_active(&agave_feature_set::vote_only_full_fec_sets::id()) { return Err(BlockstoreProcessorError::IncompleteFinalFecSet); } - return Ok(None); + return Ok((None, None)); }; // Update metrics if results.last_fec_set_merkle_root.is_none() { datapoint_warn!("incomplete_final_fec_set", ("slot", slot, i64),); } + // Return block id / error based on feature flags - results.get_last_fec_set_merkle_root(feature_set) + let chained_merkle_id = results.get_last_fec_set_merkle_root(feature_set)?; + + let alpenglow_block_id = self.compute_alpenglow_block_id(slot, parent_ag_id)?; + + Ok((chained_merkle_id, alpenglow_block_id)) + } + + fn compute_alpenglow_block_id( + &self, + slot: Slot, + parent_block_ag_id: Option, + ) -> Result> { + let Some(parent_block_ag_id) = parent_block_ag_id else { + return Ok(None); + }; + let slot_meta = self.meta(slot)?.ok_or(BlockstoreError::SlotUnavailable)?; + let last_shred_index = slot_meta + .last_index + .ok_or(BlockstoreError::UnknownLastIndex(slot))?; + let roots: Vec<_> = (0..last_shred_index) + .map(|i| { + self.merkle_root_meta_from_location( + ErasureSetId::new(slot, i as u32), + BlockLocation::Original, + ) + .unwrap() + .unwrap() + .merkle_root() + .unwrap() + }) + .dedup() + .collect(); + let id = make_double_merkle_tree(roots.into_iter(), parent_block_ag_id) + .map_err(|_| BlockstoreError::SlotUnavailable)? + .block_id; + Ok(Some(id)) } /// Performs checks on the last FEC set for this slot. @@ -4218,7 +4263,7 @@ impl Blockstore { .data_shred_cf .multi_get_keys((start_index..=last_shred_index).map(|index| (slot, index))); - let deduped_shred_checks: Vec<(Hash, bool)> = self + let deduped_shred_checks: Vec<(SliceRoot, bool)> = self .data_shred_cf .multi_get_bytes(&keys) .enumerate() @@ -4243,17 +4288,18 @@ impl Blockstore { Ok((merkle_root, is_retransmitter_signed)) }) .dedup_by(|res1, res2| res1.as_ref().ok() == res2.as_ref().ok()) - .collect::>>()?; + .collect::>>()?; // After the dedup there should be exactly one Hash left and one true value - let &[(block_id, is_retransmitter_signed)] = deduped_shred_checks.as_slice() else { + let &[(chained_merkle_id, is_retransmitter_signed)] = deduped_shred_checks.as_slice() + else { return Ok(LastFECSetCheckResults { last_fec_set_merkle_root: None, is_retransmitter_signed: false, }); }; Ok(LastFECSetCheckResults { - last_fec_set_merkle_root: Some(block_id), + last_fec_set_merkle_root: Some(chained_merkle_id), is_retransmitter_signed, }) } @@ -5277,7 +5323,7 @@ pub fn create_new_ledger( &entries, true, // is_last_in_slot // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), 0, // next_shred_index 0, // next_code_index &ReedSolomonCache::default(), @@ -5510,7 +5556,7 @@ pub fn entries_to_test_shreds( entries, is_full_slot, // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), 0, // next_shred_index, 0, // next_code_index &ReedSolomonCache::default(), @@ -7335,7 +7381,7 @@ pub mod tests { &keypair, &[], false, - Some(Hash::default()), // merkle_root + Some(SliceRoot(Hash::default())), // merkle_root (i * gap) as u32, (i * gap) as u32, &reed_solomon_cache, @@ -7518,7 +7564,7 @@ pub mod tests { &keypair, &entries, true, - Some(Hash::default()), // merkle_root + Some(SliceRoot(Hash::default())), // merkle_root 0, 0, &rsc, @@ -7546,9 +7592,9 @@ pub mod tests { &keypair, &[], true, - Some(Hash::default()), // merkle_root - 6, // next_shred_index, - 6, // next_code_index + Some(SliceRoot(Hash::default())), // merkle_root + 6, // next_shred_index, + 6, // next_code_index &rsc, &mut ProcessShredsStats::default(), ) @@ -7611,9 +7657,9 @@ pub mod tests { &Keypair::new(), &entries, true, - Some(Hash::default()), // merkle_root - last_idx, // next_shred_index, - last_idx, // next_code_index + Some(SliceRoot(Hash::default())), // merkle_root + last_idx, // next_shred_index, + last_idx, // next_code_index &rsc, &mut ProcessShredsStats::default(), ) @@ -8012,7 +8058,7 @@ pub mod tests { &keypair, &[3, 3, 3], false, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), new_index, new_index, &reed_solomon_cache, @@ -8348,7 +8394,7 @@ pub mod tests { &keypair, &[1, 1, 1], true, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), next_shred_index as u32, next_shred_index as u32, &reed_solomon_cache, @@ -10630,7 +10676,7 @@ pub mod tests { parent_slot, num_entries, fec_set_index, - Some(Hash::new_from_array(rand::thread_rng().gen())), + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), ) } @@ -10639,7 +10685,7 @@ pub mod tests { parent_slot: u64, num_entries: u64, fec_set_index: u32, - chained_merkle_root: Option, + chained_merkle_root: Option, ) -> (Vec, Vec, Arc) { setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot( slot, @@ -10656,7 +10702,7 @@ pub mod tests { parent_slot: u64, num_entries: u64, fec_set_index: u32, - chained_merkle_root: Option, + chained_merkle_root: Option, is_last_in_slot: bool, ) -> (Vec, Vec, Arc) { let entries = make_slot_entries_with_transactions(num_entries); @@ -10726,7 +10772,7 @@ pub mod tests { let leader_keypair = Arc::new(Keypair::new()); let reed_solomon_cache = ReedSolomonCache::default(); let shredder = Shredder::new(slot, 0, 0, 0).unwrap(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); let (shreds, _) = shredder.entries_to_merkle_shreds_for_tests( &leader_keypair, &entries1, @@ -11086,7 +11132,7 @@ pub mod tests { let version = version_from_hash(&entries[0].hash); let shredder = Shredder::new(slot, 0, 0, version).unwrap(); let reed_solomon_cache = ReedSolomonCache::default(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))); let kp = Keypair::new(); // produce normal shreds let (data1, coding1) = shredder.entries_to_merkle_shreds_for_tests( @@ -11202,7 +11248,7 @@ pub mod tests { &leader_keypair, &entries, true, // is_last_in_slot - Some(Hash::new_unique()), + Some(SliceRoot(Hash::new_unique())), 0, // next_shred_index 0, // next_code_index, &reed_solomon_cache, @@ -11693,7 +11739,7 @@ pub mod tests { .is_empty()); // Incorrectly chained merkle for next slot - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != data_shred.merkle_root().unwrap()); let (next_slot_data_shreds, next_slot_coding_shreds, leader_schedule) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -11726,7 +11772,7 @@ pub mod tests { let coding_shred = coding_shreds[0].clone(); // Incorrectly chained merkle for next slot - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != coding_shred.merkle_root().unwrap()); let (next_slot_data_shreds, _, leader_schedule) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -11767,7 +11813,7 @@ pub mod tests { .is_empty()); // Incorrectly chained merkle - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != coding_shred_previous.merkle_root().unwrap()); let (data_shreds, coding_shreds, leader_schedule) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -11815,7 +11861,7 @@ pub mod tests { .is_empty()); // Incorrectly chained merkle - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != coding_shred_previous.merkle_root().unwrap()); let (data_shreds, coding_shreds, leader_schedule) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -11859,7 +11905,7 @@ pub mod tests { let next_fec_set_index = fec_set_index + data_shreds.len() as u32; // Incorrectly chained merkle - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != coding_shred.merkle_root().unwrap()); let (next_data_shreds, _, leader_schedule_next) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -11906,7 +11952,7 @@ pub mod tests { let fec_set_index = prev_fec_set_index + prev_data_shreds.len() as u32; // Incorrectly chained merkle - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != prev_coding_shred.merkle_root().unwrap()); let (data_shreds, coding_shreds, leader_schedule) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -11921,7 +11967,7 @@ pub mod tests { let next_fec_set_index = fec_set_index + prev_data_shreds.len() as u32; // Incorrectly chained merkle - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != data_shred.merkle_root().unwrap()); let (next_data_shreds, _, leader_schedule_next) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -12011,7 +12057,7 @@ pub mod tests { // Add an incorrectly chained merkle from the next set. Although incorrectly chained // we skip the duplicate check as the first received coding shred index shred is missing - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != coding_shred_previous.merkle_root().unwrap()); let (data_shreds, coding_shreds, leader_schedule) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -12046,7 +12092,7 @@ pub mod tests { let next_fec_set_index = fec_set_index + data_shreds.len() as u32; // Incorrectly chained merkle - let merkle_root = Hash::new_unique(); + let merkle_root = SliceRoot(Hash::new_unique()); assert!(merkle_root != coding_shred.merkle_root().unwrap()); let (next_data_shreds, next_coding_shreds, leader_schedule_next) = setup_erasure_shreds_with_index_and_chained_merkle( @@ -12263,7 +12309,7 @@ pub mod tests { Err(BlockstoreProcessorError::IncompleteFinalFecSet) ); - let block_id = Hash::new_unique(); + let block_id = SliceRoot(Hash::new_unique()); let results = LastFECSetCheckResults { last_fec_set_merkle_root: Some(block_id), is_retransmitter_signed: false, @@ -12290,7 +12336,7 @@ pub mod tests { Err(BlockstoreProcessorError::IncompleteFinalFecSet) ); - let block_id = Hash::new_unique(); + let block_id = SliceRoot(Hash::new_unique()); let results = LastFECSetCheckResults { last_fec_set_merkle_root: Some(block_id), is_retransmitter_signed: true, diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 9ad2c1aebb..09366466ef 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -9,6 +9,7 @@ use { serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_clock::{Slot, UnixTimestamp}, solana_hash::Hash, + solana_votor_messages::SliceRoot, std::{ collections::BTreeSet, ops::{Range, RangeBounds}, @@ -396,7 +397,7 @@ pub(crate) struct ErasureConfig { #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct MerkleRootMeta { /// The merkle root, `None` for legacy shreds - merkle_root: Option, + merkle_root: Option, /// The first received shred index first_received_shred_index: u32, /// The shred type of the first received shred @@ -872,7 +873,7 @@ impl MerkleRootMeta { } } - pub(crate) fn merkle_root(&self) -> Option { + pub(crate) fn merkle_root(&self) -> Option { self.merkle_root } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 59a595f56f..6c80b0c133 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -2228,8 +2228,15 @@ pub fn process_single_slot( result? } - let block_id = blockstore - .check_last_fec_set_and_get_block_id(slot, bank.hash(), false, &bank.feature_set) + let parent_ag_id = bank.parent_alpenglow_block_id(); + let (chained_merkle_id, alpenglow_block_id) = blockstore + .check_last_fec_set_and_get_block_ids( + slot, + parent_ag_id, + bank.hash(), + false, + &bank.feature_set, + ) .inspect_err(|err| { warn!("slot {slot} failed last fec set checks: {err}"); if blockstore.is_primary_access() { @@ -2243,7 +2250,8 @@ pub fn process_single_slot( ); } })?; - bank.set_block_id(block_id); + bank.set_chained_merkle_id(chained_merkle_id); + bank.set_alpenglow_block_id(alpenglow_block_id); bank.freeze(); // all banks handled by this routine are created from complete slots if let Some(slot_callback) = &opts.slot_callback { diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index a0e1ffa9c8..2939b3fa46 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -76,6 +76,7 @@ use { solana_pubkey::Pubkey, solana_sha256_hasher::hashv, solana_signature::{Signature, SIGNATURE_BYTES}, + solana_votor_messages::SliceRoot, static_assertions::const_assert_eq, std::fmt::Debug, thiserror::Error, @@ -85,7 +86,7 @@ use {solana_keypair::Keypair, solana_perf::packet::Packet, solana_signer::Signer mod common; pub(crate) mod merkle; -mod merkle_tree; +pub(crate) mod merkle_tree; mod payload; mod shred_code; mod shred_data; @@ -383,11 +384,11 @@ impl Shred { dispatch!(fn set_signature(&mut self, signature: Signature)); dispatch!(fn signed_data(&self) -> Result); - dispatch!(pub fn chained_merkle_root(&self) -> Result); + dispatch!(pub fn chained_merkle_root(&self) -> Result); dispatch!(pub(crate) fn retransmitter_signature(&self) -> Result); dispatch!(pub fn into_payload(self) -> Payload); - dispatch!(pub fn merkle_root(&self) -> Result); + dispatch!(pub fn merkle_root(&self) -> Result); dispatch!(pub fn payload(&self) -> &Payload); dispatch!(pub fn sanitize(&self) -> Result<(), Error>); @@ -916,7 +917,7 @@ mod tests { is_last_in_slot: bool, ) -> Result, Error> { let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); - let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); + let chained_merkle_root = chained.then(|| SliceRoot(Hash::new_from_array(rng.gen()))); let parent_offset = rng.gen_range(1..=u16::try_from(slot).unwrap_or(u16::MAX)); let parent_slot = slot.checked_sub(u64::from(parent_offset)).unwrap(); let mut data = vec![0u8; data_size]; @@ -1529,7 +1530,7 @@ mod tests { &keypair, &data, false, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), 64, 64, &reed_solomon_cache, diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index af7039f095..b421b94193 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -31,6 +31,7 @@ use { solana_sha256_hasher::hashv, solana_signature::Signature, solana_signer::Signer, + solana_votor_messages::SliceRoot, static_assertions::const_assert_eq, std::{ cmp::Ordering, @@ -80,7 +81,7 @@ impl Shred { dispatch!(fn erasure_shard_mut(&mut self) -> Result>, Error>); dispatch!(fn merkle_node(&self) -> Result); dispatch!(fn sanitize(&self) -> Result<(), Error>); - dispatch!(fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error>); + dispatch!(fn set_chained_merkle_root(&mut self, chained_merkle_root: &SliceRoot) -> Result<(), Error>); dispatch!(fn set_signature(&mut self, signature: Signature)); dispatch!(fn signed_data(&self) -> Result); dispatch!(pub(super) fn common_header(&self) -> &ShredCommonHeader); @@ -139,8 +140,8 @@ impl Shred { impl Shred { dispatch!(fn erasure_shard(&self) -> Result<&[u8], Error>); dispatch!(fn proof_size(&self) -> Result); - dispatch!(pub(super) fn chained_merkle_root(&self) -> Result); - dispatch!(pub(super) fn merkle_root(&self) -> Result); + dispatch!(pub(super) fn chained_merkle_root(&self) -> Result); + dispatch!(pub(super) fn merkle_root(&self) -> Result); dispatch!(pub(super) fn retransmitter_signature(&self) -> Result); dispatch!(pub(super) fn retransmitter_signature_offset(&self) -> Result); @@ -185,7 +186,7 @@ impl ShredData { proof_size: u8, chained: bool, resigned: bool, - ) -> Option { + ) -> Option { debug_assert_eq!( shred::layout::get_shred_variant(shred).unwrap(), ShredVariant::MerkleData { @@ -207,7 +208,7 @@ impl ShredData { let proof_offset = Self::get_proof_offset(proof_size, chained, resigned).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; - get_merkle_root(index, node, proof).ok() + get_merkle_root(index, node, proof).map(SliceRoot).ok() } pub(crate) const fn const_capacity( @@ -241,7 +242,7 @@ impl ShredCode { proof_size: u8, chained: bool, resigned: bool, - ) -> Option { + ) -> Option { debug_assert_eq!( shred::layout::get_shred_variant(shred).unwrap(), ShredVariant::MerkleCode { @@ -265,7 +266,7 @@ impl ShredCode { let proof_offset = Self::get_proof_offset(proof_size, chained, resigned).ok()?; let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; - get_merkle_root(index, node, proof).ok() + get_merkle_root(index, node, proof).map(SliceRoot).ok() } } @@ -347,7 +348,7 @@ macro_rules! impl_merkle_shred { Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?) } - pub(super) fn chained_merkle_root(&self) -> Result { + pub(super) fn chained_merkle_root(&self) -> Result { let offset = self.chained_merkle_root_offset()?; self.payload .get(offset..offset + SIZE_OF_MERKLE_ROOT) @@ -357,25 +358,29 @@ macro_rules! impl_merkle_shred { .unwrap() }) .ok_or(Error::InvalidPayloadSize(self.payload.len())) + .map(SliceRoot) } - fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { + fn set_chained_merkle_root( + &mut self, + chained_merkle_root: &SliceRoot, + ) -> Result<(), Error> { let offset = self.chained_merkle_root_offset()?; let Some(mut buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else { return Err(Error::InvalidPayloadSize(self.payload.len())); }; - buffer.copy_from_slice(chained_merkle_root.as_ref()); + buffer.copy_from_slice(chained_merkle_root.0.as_ref()); Ok(()) } - pub(super) fn merkle_root(&self) -> Result { + pub(super) fn merkle_root(&self) -> Result { let proof_size = self.proof_size()?; let index = self.erasure_shard_index()?; let proof_offset = self.proof_offset()?; let proof = get_merkle_proof(&self.payload, proof_offset, proof_size)?; let node = get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset)?; - get_merkle_root(index, node, proof) + get_merkle_root(index, node, proof).map(SliceRoot) } fn merkle_proof(&self) -> Result, Error> { @@ -553,7 +558,7 @@ impl<'a> ShredTrait<'a> for ShredData { } fn signed_data(&'a self) -> Result { - self.merkle_root() + self.merkle_root().map(|sr| sr.0) } } @@ -609,7 +614,7 @@ impl<'a> ShredTrait<'a> for ShredCode { } fn signed_data(&'a self) -> Result { - self.merkle_root() + self.merkle_root().map(|sr| sr.0) } } @@ -839,18 +844,18 @@ pub(super) fn recover( // The attached signature verifies only if we obtain the same Merkle root. // Because shreds obtained from turbine or repair are sig-verified, this // also means that we don't need to verify signatures for recovered shreds. - if tree.last() != Some(&merkle_root) { + if tree.root != merkle_root { return Err(Error::InvalidMerkleRoot); } let set_merkle_proof = move |(index, (mut shred, mask)): (_, (Shred, _))| { if mask { debug_assert!({ - let proof = make_merkle_proof(index, num_shards, &tree); + let proof = make_merkle_proof(index, num_shards, &tree.hashes); shred.merkle_proof()?.map(Some).eq(proof.map(Result::ok)) }); Ok(None) } else { - let proof = make_merkle_proof(index, num_shards, &tree); + let proof = make_merkle_proof(index, num_shards, &tree.hashes); shred.set_merkle_proof(proof)?; // Already sanitized after reconstruct. debug_assert_matches!(shred.sanitize(), Ok(())); @@ -901,7 +906,7 @@ fn make_stub_shred( erasure_shard_index: usize, common_header: &ShredCommonHeader, coding_header: &CodingShredHeader, - chained_merkle_root: &Option, + chained_merkle_root: &Option, retransmitter_signature: &Option, ) -> Result { let num_data_shreds = usize::from(coding_header.num_data_shreds); @@ -1025,7 +1030,7 @@ pub(crate) fn make_shreds_from_data( thread_pool: &ThreadPool, keypair: &Keypair, // The Merkle root of the previous erasure batch if chained. - chained_merkle_root: Option, + chained_merkle_root: Option, mut data: &[u8], // Serialized &[Entry] slot: Slot, parent_slot: Slot, @@ -1225,9 +1230,9 @@ fn finish_erasure_batch( keypair: &Keypair, shreds: &mut [Shred], // The Merkle root of the previous erasure batch if chained. - chained_merkle_root: Option, + chained_merkle_root: Option, reed_solomon_cache: &ReedSolomonCache, -) -> Result { +) -> Result { debug_assert_eq!(shreds.iter().map(Shred::fec_set_index).dedup().count(), 1); // Write common and {data,coding} headers into shreds' payload. fn write_headers(shred: &mut Shred) -> Result<(), bincode::Error> { @@ -1291,11 +1296,11 @@ fn finish_erasure_batch( })), }?; // Sign the root of the Merkle tree. - let root = tree.last().copied().ok_or(Error::InvalidMerkleProof)?; - let signature = keypair.sign_message(root.as_ref()); + let root = tree.root; + let signature = keypair.sign_message(root.0.as_ref()); // Populate merkle proof for all shreds and attach signature. for (index, shred) in shreds.iter_mut().enumerate() { - let proof = make_merkle_proof(index, erasure_batch_size, &tree); + let proof = make_merkle_proof(index, erasure_batch_size, &tree.hashes); shred.set_merkle_proof(proof)?; shred.set_signature(signature); debug_assert!(shred.verify(&keypair.pubkey())); @@ -1413,7 +1418,7 @@ mod test { let tree = make_merkle_tree(nodes.into_iter().map(Ok)).unwrap(); for index in size..size + 3 { assert_matches!( - make_merkle_proof(index, size, &tree).next(), + make_merkle_proof(index, size, &tree.hashes).next(), Some(Err(Error::InvalidMerkleProof)) ); } @@ -1553,7 +1558,7 @@ mod test { let nodes = shreds.iter().map(Shred::merkle_node); let tree = make_merkle_tree(nodes).unwrap(); for (index, shred) in shreds.iter_mut().enumerate() { - let proof = make_merkle_proof(index, num_shreds, &tree); + let proof = make_merkle_proof(index, num_shreds, &tree.hashes); shred.set_merkle_proof(proof).unwrap(); let data = shred.signed_data().unwrap(); let signature = keypair.sign_message(data.as_ref()); @@ -1686,7 +1691,7 @@ mod test { ) { let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); let keypair = Keypair::new(); - let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); + let chained_merkle_root = chained.then(|| SliceRoot(Hash::new_from_array(rng.gen()))); let resigned = chained && is_last_in_slot; let slot = 149_745_689; let parent_slot = slot - rng.gen_range(1..65536); @@ -1750,7 +1755,7 @@ mod test { assert_matches!(shred.chained_merkle_root(), Err(Error::InvalidShredVariant)); None }; - assert!(signature.verify(pubkey.as_ref(), merkle_root.as_ref())); + assert!(signature.verify(pubkey.as_ref(), merkle_root.0.as_ref())); // Verify shred::layout api. let shred = shred.payload(); assert_eq!(shred::layout::get_signature(shred), Some(signature)); @@ -1769,7 +1774,7 @@ mod test { chained_merkle_root ); let data = shred::layout::get_signed_data(shred).unwrap(); - assert_eq!(data, merkle_root); + assert_eq!(data, merkle_root.0); assert!(signature.verify(pubkey.as_ref(), data.as_ref())); } // Verify common, data and coding headers. @@ -1842,7 +1847,7 @@ mod test { assert!(num_coding_shreds >= num_data_shreds); // Verify chained Merkle roots. if let Some(chained_merkle_root) = chained_merkle_root { - let chained_merkle_roots: HashMap = + let chained_merkle_roots: HashMap = std::iter::once((0, chained_merkle_root)) .chain( shreds diff --git a/ledger/src/shred/merkle_tree.rs b/ledger/src/shred/merkle_tree.rs index d9dcd46313..b650614ff7 100644 --- a/ledger/src/shred/merkle_tree.rs +++ b/ledger/src/shred/merkle_tree.rs @@ -1,6 +1,9 @@ use { - crate::shred::Error, solana_hash::Hash, solana_sha256_hasher::hashv, - static_assertions::const_assert_eq, std::iter::successors, + crate::shred::Error, + solana_hash::Hash, + solana_sha256_hasher::hashv, + solana_votor_messages::{AlpenglowBlockId, SliceRoot}, + static_assertions::const_assert_eq, }; pub(crate) const SIZE_OF_MERKLE_ROOT: usize = std::mem::size_of::(); @@ -18,58 +21,105 @@ pub(crate) const MERKLE_HASH_PREFIX_LEAF: &[u8] = b"\x00SOLANA_MERKLE_SHREDS_LEA pub(crate) const MERKLE_HASH_PREFIX_NODE: &[u8] = b"\x01SOLANA_MERKLE_SHREDS_NODE"; pub(crate) type MerkleProofEntry = [u8; 20]; +pub(crate) struct SliceMerkleTree { + pub hashes: Vec>, + pub root: SliceRoot, +} + +pub(crate) struct DoubleMerkleTree { + pub _hashes: Vec>, + pub _parent_id: AlpenglowBlockId, + pub block_id: AlpenglowBlockId, +} -pub fn make_merkle_tree(shreds: I) -> Result, Error> +fn make_basic_merkle_tree(items: I) -> Result>, Error> where I: IntoIterator>, ::IntoIter: ExactSizeIterator, { - let shreds = shreds.into_iter(); - let num_shreds = shreds.len(); - let capacity = get_merkle_tree_size(num_shreds); - let mut nodes = Vec::with_capacity(capacity); - for shred in shreds { - nodes.push(shred?); + let items = items.into_iter(); + let num_items = items.len(); + let non_leaf_num = get_non_leaf_num(num_items); + let mut nodes = vec![None; num_items + non_leaf_num]; + let mut leaf_index = non_leaf_num; + for item in items { + nodes[leaf_index] = Some(item?); + leaf_index += 1; } - let init = (num_shreds > 1).then_some(num_shreds); - for size in successors(init, |&k| (k > 2).then_some((k + 1) >> 1)) { - let offset = nodes.len() - size; - for index in (offset..offset + size).step_by(2) { - let node = &nodes[index]; - let other = &nodes[(index + 1).min(offset + size - 1)]; - let parent = join_nodes(node, other); - nodes.push(parent); + for i in (1..=(non_leaf_num - 1)).rev() { + if let Some(Some(h1)) = nodes.get(2 * i) { + if let Some(Some(h2)) = nodes.get(2 * i + 1) { + nodes[i] = Some(join_nodes(h1, h2)); + } else { + nodes[i] = Some(join_nodes(h1, h1)); + } + } else { + nodes[i] = None; } } - debug_assert_eq!(nodes.len(), capacity); Ok(nodes) } +// The tree for a slice (erasure set). +pub fn make_merkle_tree(shreds: I) -> Result +where + I: IntoIterator>, + ::IntoIter: ExactSizeIterator, +{ + let nodes = make_basic_merkle_tree(shreds)?; + let root = nodes[1].unwrap(); + Ok(SliceMerkleTree { + hashes: nodes, + root: SliceRoot(root), + }) +} + +// The tree on top of slice trees for Alpenglow block id/repair. +pub fn make_double_merkle_tree( + slice_roots: I, + parent_id: AlpenglowBlockId, +) -> Result +where + I: IntoIterator, + ::IntoIter: ExactSizeIterator, +{ + let hashes: Vec<_> = slice_roots + .into_iter() + .map(|slice_root| Ok(slice_root.0)) + .chain(std::iter::once(Ok(parent_id.0))) + .collect(); + let nodes = make_basic_merkle_tree(hashes)?; + let root = nodes[1].unwrap(); + Ok(DoubleMerkleTree { + _hashes: nodes, + _parent_id: parent_id, + block_id: AlpenglowBlockId(root), + }) +} + pub fn make_merkle_proof( - mut index: usize, // leaf index ~ shred's erasure shard index. - mut size: usize, // number of leaves ~ erasure batch size. - tree: &[Hash], + mut index: usize, // leaf index + mut size: usize, // number of leaves in the tree + tree: &[Option], ) -> impl Iterator> { - let mut offset = 0; - if index >= size { - // Force below iterator to return Error. - (size, offset) = (0, tree.len()); - } + let non_leaf_num = get_non_leaf_num(size); + size += non_leaf_num; + index += non_leaf_num; + std::iter::from_fn(move || { - if size > 1 { - let Some(node) = tree.get(offset + (index ^ 1).min(size - 1)) else { - return Some(Err(Error::InvalidMerkleProof)); + if index == 1 { + None + } else if index >= size { + Some(Err(Error::InvalidMerkleProof)) + } else { + let node = match tree.get(index ^ 1) { + Some(Some(hash)) => hash, + _ => tree[index].as_ref().expect("node must have a hash"), }; - offset += size; - size = (size + 1) >> 1; index >>= 1; - let entry = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; - let entry = <&MerkleProofEntry>::try_from(entry).unwrap(); + let entry_bytes = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; + let entry = <&MerkleProofEntry>::try_from(entry_bytes).unwrap(); Some(Ok(entry)) - } else if offset + 1 == tree.len() { - None - } else { - Some(Err(Error::InvalidMerkleProof)) } }) } @@ -102,9 +152,13 @@ where .ok_or(Error::InvalidMerkleProof) } -// Given number of shreds, returns the number of nodes in the Merkle tree. -pub fn get_merkle_tree_size(num_shreds: usize) -> usize { - successors(Some(num_shreds), |&k| (k > 1).then_some((k + 1) >> 1)).sum() +// Given number of items, returns the number of non-leaf nodes in the Merkle tree. +pub fn get_non_leaf_num(leaf_num: usize) -> usize { + let mut non_leaf_num = 1; + while non_leaf_num < leaf_num { + non_leaf_num *= 2; + } + non_leaf_num } // Maps number of (code + data) shreds to merkle_proof.len(). @@ -137,14 +191,6 @@ mod tests { assert_eq!(entry, &bytes[..SIZE_OF_MERKLE_PROOF_ENTRY]); } - #[test] - fn test_get_merkle_tree_size() { - const TREE_SIZE: [usize; 15] = [0, 1, 3, 6, 7, 11, 12, 14, 15, 20, 21, 23, 24, 27, 28]; - for (num_shreds, size) in TREE_SIZE.into_iter().enumerate() { - assert_eq!(get_merkle_tree_size(num_shreds), size); - } - } - #[test] fn test_make_merkle_proof_error() { let mut rng = rand::thread_rng(); @@ -154,7 +200,7 @@ mod tests { let tree = make_merkle_tree(nodes.into_iter().map(Ok)).unwrap(); for index in size..size + 3 { assert_matches!( - make_merkle_proof(index, size, &tree).next(), + make_merkle_proof(index, size, &tree.hashes).next(), Some(Err(Error::InvalidMerkleProof)) ); } @@ -164,10 +210,10 @@ mod tests { let nodes = repeat_with(|| rng.gen::<[u8; 32]>()).map(Hash::from); let nodes: Vec<_> = nodes.take(size).collect(); let tree = make_merkle_tree(nodes.iter().cloned().map(Ok)).unwrap(); - let root = tree.last().copied().unwrap(); + let root = tree.hashes[1].unwrap(); for index in 0..size { for (k, &node) in nodes.iter().enumerate() { - let proof = make_merkle_proof(index, size, &tree).map(Result::unwrap); + let proof = make_merkle_proof(index, size, &tree.hashes).map(Result::unwrap); if k == index { assert_eq!(root, get_merkle_root(k, node, proof).unwrap()); } else { diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 9123b37dae..63481913fe 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -10,6 +10,7 @@ use { solana_hash::Hash, solana_packet::PACKET_DATA_SIZE, solana_signature::Signature, + solana_votor_messages::SliceRoot, static_assertions::const_assert_eq, }; @@ -38,13 +39,13 @@ impl ShredCode { shred.signed_data() } - pub(super) fn chained_merkle_root(&self) -> Result { + pub(super) fn chained_merkle_root(&self) -> Result { match self { Self::Merkle(shred) => shred.chained_merkle_root(), } } - pub(super) fn merkle_root(&self) -> Result { + pub(super) fn merkle_root(&self) -> Result { match self { Self::Merkle(shred) => shred.merkle_root(), } diff --git a/ledger/src/shred/shred_data.rs b/ledger/src/shred/shred_data.rs index fae7990a77..54a817d989 100644 --- a/ledger/src/shred/shred_data.rs +++ b/ledger/src/shred/shred_data.rs @@ -11,6 +11,7 @@ use { solana_clock::Slot, solana_hash::Hash, solana_signature::Signature, + solana_votor_messages::SliceRoot, }; #[derive(Clone, Debug, Eq, PartialEq)] @@ -34,13 +35,13 @@ impl ShredData { shred.signed_data() } - pub(super) fn chained_merkle_root(&self) -> Result { + pub(super) fn chained_merkle_root(&self) -> Result { match self { Self::Merkle(shred) => shred.chained_merkle_root(), } } - pub(super) fn merkle_root(&self) -> Result { + pub(super) fn merkle_root(&self) -> Result { match self { Self::Merkle(shred) => shred.merkle_root(), } diff --git a/ledger/src/shred/wire.rs b/ledger/src/shred/wire.rs index f9109ea2e4..cbf0d6b66f 100644 --- a/ledger/src/shred/wire.rs +++ b/ledger/src/shred/wire.rs @@ -12,6 +12,7 @@ use { solana_perf::packet::{PacketRef, PacketRefMut}, solana_signature::{Signature, SIGNATURE_BYTES}, solana_signer::Signer, + solana_votor_messages::SliceRoot, std::ops::Range, }; #[cfg(test)] @@ -185,7 +186,7 @@ pub(crate) fn get_signed_data(shred: &[u8]) -> Option { resigned, } => shred::merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned)?, }; - Some(data) + Some(data.0) } pub fn get_reference_tick(shred: &[u8]) -> Result { @@ -198,7 +199,7 @@ pub fn get_reference_tick(shred: &[u8]) -> Result { Ok(flags & ShredFlags::SHRED_TICK_REFERENCE_MASK.bits()) } -pub fn get_merkle_root(shred: &[u8]) -> Option { +pub fn get_merkle_root(shred: &[u8]) -> Option { match get_shred_variant(shred).ok()? { ShredVariant::MerkleCode { proof_size, @@ -213,7 +214,7 @@ pub fn get_merkle_root(shred: &[u8]) -> Option { } } -pub(crate) fn get_chained_merkle_root(shred: &[u8]) -> Option { +pub(crate) fn get_chained_merkle_root(shred: &[u8]) -> Option { let offset = match get_shred_variant(shred).ok()? { ShredVariant::MerkleCode { proof_size, @@ -232,9 +233,9 @@ pub(crate) fn get_chained_merkle_root(shred: &[u8]) -> Option { } .ok()?; let merkle_root = shred.get(offset..offset + SIZE_OF_MERKLE_ROOT)?; - Some(Hash::from( + Some(SliceRoot(Hash::from( <[u8; SIZE_OF_MERKLE_ROOT]>::try_from(merkle_root).unwrap(), - )) + ))) } fn get_retransmitter_signature_offset(shred: &[u8]) -> Result { @@ -347,7 +348,7 @@ pub fn resign_shred(shred: &mut [u8], keypair: &Keypair) -> Result<(), Error> { let Some(buffer) = shred.get_mut(offset..offset + SIGNATURE_BYTES) else { return Err(Error::InvalidPayloadSize(shred.len())); }; - let signature = keypair.sign_message(merkle_root.as_ref()); + let signature = keypair.sign_message(merkle_root.0.as_ref()); buffer.copy_from_slice(signature.as_ref()); Ok(()) } @@ -555,7 +556,7 @@ mod tests { }); assert_eq!( get_signed_data(bytes).unwrap(), - shred.merkle_root().unwrap() + shred.merkle_root().unwrap().0 ); assert_eq!( get_merkle_root(bytes).unwrap(), @@ -593,7 +594,7 @@ mod tests { { let mut bytes = bytes.to_vec(); let keypair = Keypair::new(); - let signature = keypair.sign_message(shred.merkle_root().unwrap().as_ref()); + let signature = keypair.sign_message(shred.merkle_root().unwrap().0.as_ref()); assert_matches!(resign_shred(&mut bytes, &keypair), Ok(())); assert_eq!(get_retransmitter_signature(&bytes).unwrap(), signature); let shred = shred::merkle::Shred::from_payload(bytes).unwrap(); diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index 036a5127c7..f9c03f627a 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -10,6 +10,7 @@ use { solana_hash::Hash, solana_keypair::Keypair, solana_rayon_threadlimit::get_thread_count, + solana_votor_messages::SliceRoot, std::{ fmt::Debug, sync::{Arc, OnceLock, RwLock}, @@ -69,7 +70,7 @@ impl Shredder { keypair: &Keypair, entries: &[Entry], is_last_in_slot: bool, - chained_merkle_root: Option, + chained_merkle_root: Option, next_shred_index: u32, next_code_index: u32, reed_solomon_cache: &ReedSolomonCache, @@ -98,7 +99,7 @@ impl Shredder { keypair: &Keypair, data: &[u8], is_last_in_slot: bool, - chained_merkle_root: Option, + chained_merkle_root: Option, next_shred_index: u32, next_code_index: u32, reed_solomon_cache: &ReedSolomonCache, @@ -128,7 +129,7 @@ impl Shredder { keypair: &Keypair, entries: &[Entry], is_last_in_slot: bool, - chained_merkle_root: Option, + chained_merkle_root: Option, next_shred_index: u32, next_code_index: u32, reed_solomon_cache: &ReedSolomonCache, @@ -204,7 +205,7 @@ impl Shredder { keypair, &[], true, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), 0, 0, &reed_solomon_cache, @@ -308,9 +309,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - Some(Hash::new_from_array(rand::thread_rng().gen())), // chained_merkle_root - start_index, // next_shred_index - start_index, // next_code_index + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), // chained_merkle_root + start_index, // next_shred_index + start_index, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -394,9 +395,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - Some(Hash::new_from_array(rand::thread_rng().gen())), // chained_merkle_root - 369, // next_shred_index - 776, // next_code_index + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), // chained_merkle_root + 369, // next_shred_index + 776, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -426,9 +427,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - Some(Hash::new_from_array(rand::thread_rng().gen())), // chained_merkle_root - 0, // next_shred_index - 0, // next_code_index + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -463,9 +464,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - Some(Hash::new_from_array(rand::thread_rng().gen())), // chained_merkle_root - 0, // next_shred_index - 0, // next_code_index + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -510,9 +511,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - Some(Hash::new_from_array(rand::thread_rng().gen())), // chained_merkle_root - 0, // next_shred_index - 0, // next_code_index + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -560,9 +561,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - Some(Hash::new_from_array(rand::thread_rng().gen())), // chained_merkle_root - 0, // next_shred_index - 0, // next_code_index + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -594,9 +595,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - Some(Hash::new_from_array(rand::thread_rng().gen())), // chained_merkle_root - start_index, // next_shred_index - start_index, // next_code_index + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), // chained_merkle_root + start_index, // next_shred_index + start_index, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 39690dc37b..086c90055d 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -31,13 +31,14 @@ use { solana_keypair::Keypair, solana_perf::packet::PacketRefMut, solana_signer::Signer, + solana_votor_messages::SliceRoot, std::sync::Arc, }; #[cfg(test)] const SIGN_SHRED_GPU_MIN: usize = 256; -pub type LruCache = lazy_lru::LruCache<(Signature, Pubkey, /*merkle root:*/ Hash), ()>; +pub type LruCache = lazy_lru::LruCache<(Signature, Pubkey, Hash), ()>; pub type SlotPubkeys = HashMap>; @@ -161,7 +162,7 @@ fn slot_key_data_for_gpu( } // Recovers merkle roots from shreds binary. -fn get_merkle_roots( +fn get_merkle_root_hashes( thread_pool: &ThreadPool, packets: &[PacketBatch], recycler_cache: &RecyclerCache, @@ -178,7 +179,7 @@ fn get_merkle_roots( return None; } let shred = shred::layout::get_shred(packet)?; - shred::layout::get_merkle_root(shred) + shred::layout::get_merkle_root(shred).map(|root| root.0) }) }) .collect() @@ -281,7 +282,7 @@ pub fn verify_shreds_gpu( //HACK: Pubkeys vector is passed along as a `PacketBatch` buffer to the GPU //TODO: GPU needs a more opaque interface, which can handle variable sized structures for data let (merkle_roots, merkle_roots_offsets) = - get_merkle_roots(thread_pool, batches, recycler_cache); + get_merkle_root_hashes(thread_pool, batches, recycler_cache); // Merkle roots are placed after pubkeys; adjust offsets accordingly. let merkle_roots_offsets = { let shift = pubkeys.len(); @@ -430,7 +431,7 @@ fn sign_shreds_gpu( secret_offsets.resize(packet_count, pubkey_size as u32); let (merkle_roots, merkle_roots_offsets) = - get_merkle_roots(thread_pool, batches, recycler_cache); + get_merkle_root_hashes(thread_pool, batches, recycler_cache); // Merkle roots are placed after the keypair; adjust offsets accordingly. let merkle_roots_offsets = { let shift = pinned_keypair.len(); @@ -556,7 +557,7 @@ mod tests { &keypair, &[], true, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), 0, 0, &reed_solomon_cache, @@ -689,7 +690,7 @@ mod tests { keypair, &[], true, - Some(Hash::default()), + Some(SliceRoot(Hash::default())), 0, 0, &reed_solomon_cache, @@ -762,7 +763,7 @@ mod tests { &make_entries(rng, num_entries), is_last_in_slot, // chained_merkle_root - chained.then(|| Hash::new_from_array(rng.gen())), + chained.then(|| SliceRoot(Hash::new_from_array(rng.gen()))), rng.gen_range(0..2671), // next_shred_index rng.gen_range(0..2781), // next_code_index &reed_solomon_cache, diff --git a/ledger/src/wire_format_tests.rs b/ledger/src/wire_format_tests.rs index c78469ec76..544bc53054 100644 --- a/ledger/src/wire_format_tests.rs +++ b/ledger/src/wire_format_tests.rs @@ -47,8 +47,8 @@ mod tests { ); println!( "Shred merkle root {:X?}, chained root {:X?}, rtx_sign {:X?}", - merkle_root.map(|v| v.as_ref().to_vec()), - chained_merkle_root.map(|v| v.as_ref().to_vec()), + merkle_root.map(|v| v.0.as_ref().to_vec()), + chained_merkle_root.map(|v| v.0.as_ref().to_vec()), rtx_sign.map(|v| v.as_ref().to_vec()) ); println!( diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index ce7851adb7..f435176063 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -10,6 +10,7 @@ use { }, solana_signer::Signer, solana_system_transaction as system_transaction, + solana_votor_messages::SliceRoot, std::{ collections::{BTreeMap, HashSet}, convert::TryInto, @@ -32,7 +33,7 @@ fn test_multi_fec_block_coding(is_last_in_slot: bool) { let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); let entry = Entry::new(&Hash::default(), 1, vec![tx0]); - let chained_merkle_root = Some(Hash::default()); + let chained_merkle_root = Some(SliceRoot(Hash::default())); let merkle_capacity = ShredData::capacity(Some((6, true, is_last_in_slot))).unwrap(); let num_entries = max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(merkle_capacity)); @@ -202,7 +203,7 @@ fn setup_different_sized_fec_blocks( let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); let entry = Entry::new(&Hash::default(), 1, vec![tx0]); let merkle_capacity = ShredData::capacity(Some((6, true, true))).unwrap(); - let chained_merkle_root = Some(Hash::default()); + let chained_merkle_root = Some(SliceRoot(Hash::default())); assert!(DATA_SHREDS_PER_FEC_BLOCK > 2); let num_shreds_per_iter = DATA_SHREDS_PER_FEC_BLOCK; diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 7cf3d5f527..9c3f24c145 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -103,6 +103,7 @@ use { CertificateType, ConsensusMessage, VoteMessage, BLS_KEYPAIR_DERIVE_SEED, }, vote::Vote, + AlpenglowBlockId, }, std::{ collections::{BTreeSet, HashMap, HashSet}, @@ -6572,7 +6573,7 @@ fn test_alpenglow_ensure_liveness_after_double_notar_fallback() { struct VoteListenerState { num_notar_fallback_votes: u32, a_equivocates: bool, - notar_fallback_map: HashMap>, + notar_fallback_map: HashMap>, double_notar_fallback_slots: Vec, check_for_roots: bool, post_experiment_votes: HashMap>, @@ -6617,7 +6618,7 @@ fn test_alpenglow_ensure_liveness_after_double_notar_fallback() { // Create vote for Node B (potentially equivocated) let vote = &vote_message.vote; let vote_b = if self.a_equivocates && vote.is_notarization() { - let new_block_id = Hash::new_unique(); + let new_block_id = AlpenglowBlockId(Hash::new_unique()); Vote::new_notarization_vote(vote.slot(), new_block_id) } else { *vote diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 75f1b729ca..5d52ff1f79 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -160,6 +160,7 @@ use { solana_transaction_context::{transaction_accounts::TransactionAccount, TransactionReturnData}, solana_transaction_error::{TransactionError, TransactionResult as Result}, solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap}, + solana_votor_messages::{AlpenglowBlockId, SliceRoot}, std::{ collections::{HashMap, HashSet}, fmt, @@ -550,7 +551,8 @@ impl PartialEq for Bank { fee_structure: _, cache_for_accounts_lt_hash: _, stats_for_accounts_lt_hash: _, - block_id, + chained_merkle_id, + alpenglow_block_id, bank_hash_stats: _, epoch_rewards_calculation_cache: _, // Ignore new fields explicitly if they do not impact PartialEq. @@ -590,7 +592,8 @@ impl PartialEq for Bank { && (Arc::ptr_eq(hash_overrides, &other.hash_overrides) || *hash_overrides.lock().unwrap() == *other.hash_overrides.lock().unwrap()) && *accounts_lt_hash.lock().unwrap() == *other.accounts_lt_hash.lock().unwrap() - && *block_id.read().unwrap() == *other.block_id.read().unwrap() + && *chained_merkle_id.read().unwrap() == *other.chained_merkle_id.read().unwrap() + && *alpenglow_block_id.read().unwrap() == *other.alpenglow_block_id.read().unwrap() } } @@ -888,10 +891,13 @@ pub struct Bank { /// Stats related to the accounts lt hash stats_for_accounts_lt_hash: AccountsLtHashStats, - /// The unique identifier for the corresponding block for this bank. - /// None for banks that have not yet completed replay or for leader banks as we cannot populate block_id + /// Chained Merkle root (to be removed when we have time) + /// None for banks that have not yet completed replay or for leader banks as we cannot populate the field /// until bankless leader. Can be computed directly from shreds without needing to execute transactions. - block_id: RwLock>, + chained_merkle_id: RwLock>, + + /// Block ID, used for voting, block repair, and other consensus logic. + alpenglow_block_id: RwLock>, /// Accounts stats for computing the bank hash bank_hash_stats: AtomicBankHashStats, @@ -1093,7 +1099,8 @@ impl Bank { accounts_lt_hash: Mutex::new(AccountsLtHash(LtHash::identity())), cache_for_accounts_lt_hash: DashMap::default(), stats_for_accounts_lt_hash: AccountsLtHashStats::default(), - block_id: RwLock::new(None), + chained_merkle_id: RwLock::new(None), + alpenglow_block_id: RwLock::new(None), bank_hash_stats: AtomicBankHashStats::default(), epoch_rewards_calculation_cache: Arc::new(Mutex::new(HashMap::default())), }; @@ -1340,7 +1347,8 @@ impl Bank { accounts_lt_hash: Mutex::new(parent.accounts_lt_hash.lock().unwrap().clone()), cache_for_accounts_lt_hash: DashMap::default(), stats_for_accounts_lt_hash: AccountsLtHashStats::default(), - block_id: RwLock::new(None), + chained_merkle_id: RwLock::new(None), + alpenglow_block_id: RwLock::new(None), bank_hash_stats: AtomicBankHashStats::default(), epoch_rewards_calculation_cache: parent.epoch_rewards_calculation_cache.clone(), }; @@ -1808,7 +1816,8 @@ impl Bank { accounts_lt_hash: Mutex::new(fields.accounts_lt_hash), cache_for_accounts_lt_hash: DashMap::default(), stats_for_accounts_lt_hash: AccountsLtHashStats::default(), - block_id: RwLock::new(None), + chained_merkle_id: RwLock::new(None), + alpenglow_block_id: RwLock::new(None), bank_hash_stats: AtomicBankHashStats::new(&fields.bank_hash_stats), epoch_rewards_calculation_cache: Arc::new(Mutex::new(HashMap::default())), }; @@ -5560,16 +5569,28 @@ impl Bank { &self.fee_structure } - pub fn parent_block_id(&self) -> Option { - self.parent().and_then(|p| p.block_id()) + pub fn parent_chained_merkle_id(&self) -> Option { + self.parent().and_then(|p| p.chained_merkle_id()) + } + + pub fn parent_alpenglow_block_id(&self) -> Option { + self.parent().and_then(|p| p.alpenglow_block_id()) + } + + pub fn chained_merkle_id(&self) -> Option { + *self.chained_merkle_id.read().unwrap() + } + + pub fn set_chained_merkle_id(&self, chained_merkle_id: Option) { + *self.chained_merkle_id.write().unwrap() = chained_merkle_id; } - pub fn block_id(&self) -> Option { - *self.block_id.read().unwrap() + pub fn alpenglow_block_id(&self) -> Option { + *self.alpenglow_block_id.read().unwrap() } - pub fn set_block_id(&self, block_id: Option) { - *self.block_id.write().unwrap() = block_id; + pub fn set_alpenglow_block_id(&self, alpenglow_block_id: Option) { + *self.alpenglow_block_id.write().unwrap() = alpenglow_block_id; } pub fn compute_budget(&self) -> Option { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 73a6f471f6..47b471fcba 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -12440,15 +12440,32 @@ fn test_startup_from_snapshot_after_precompile_transition() { } #[test] -fn test_parent_block_id() { +fn test_parent_alpenglow_block_id() { // Setup parent bank and populate block ID. let (genesis_config, _mint_keypair) = create_genesis_config(100_000); let parent_bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let parent_block_id = Some(Hash::new_unique()); - parent_bank.set_block_id(parent_block_id); + let parent_alpenglow_block_id = Some(AlpenglowBlockId(Hash::new_unique())); + parent_bank.set_alpenglow_block_id(parent_alpenglow_block_id); // Create child from parent and ensure parent block ID links back to the // expected value. let child_bank = Bank::new_from_parent(parent_bank, &Pubkey::new_unique(), 1); - assert_eq!(parent_block_id, child_bank.parent_block_id()); + assert_eq!( + parent_alpenglow_block_id, + child_bank.parent_alpenglow_block_id() + ); +} + +#[test] +fn test_parent_chained_merkle_id() { + // Setup parent bank and populate chained merkle. + let (genesis_config, _mint_keypair) = create_genesis_config(100_000); + let parent_bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let parent_chained_merkle = Some(SliceRoot(Hash::new_unique())); + parent_bank.set_chained_merkle_id(parent_chained_merkle); + + // Create child from parent and ensure parent chained merkle links back to the + // expected value. + let child_bank = Bank::new_from_parent(parent_bank, &Pubkey::new_unique(), 1); + assert_eq!(parent_chained_merkle, child_bank.parent_chained_merkle_id()); } diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index 50292b5b58..522247ee06 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -56,6 +56,7 @@ solana-time-utils = { workspace = true } solana-tls-utils = { workspace = true } solana-transaction-error = { workspace = true } solana-votor = { workspace = true } +solana-votor-messages = { workspace = true } static_assertions = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } diff --git a/turbine/benches/cluster_nodes.rs b/turbine/benches/cluster_nodes.rs index a34fd22718..d1bfaac3a0 100644 --- a/turbine/benches/cluster_nodes.rs +++ b/turbine/benches/cluster_nodes.rs @@ -13,6 +13,7 @@ use { cluster_nodes::{make_test_cluster, new_cluster_nodes, ClusterNodes}, retransmit_stage::RetransmitStage, }, + solana_votor_messages::SliceRoot, }; fn make_cluster_nodes( @@ -32,7 +33,7 @@ fn get_retransmit_peers_deterministic( slot_leader: &Pubkey, ) { let keypair = Keypair::new(); - let merkle_root = Some(Hash::default()); + let merkle_root = Some(SliceRoot(Hash::default())); let reed_solomon_cache = ReedSolomonCache::default(); let mut stats = ProcessShredsStats::default(); let parent_slot = if slot > 0 { slot - 1 } else { 0 }; diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index cd30213540..680cca9f0b 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -606,6 +606,7 @@ pub mod test { }, solana_runtime::bank::Bank, solana_signer::Signer, + solana_votor_messages::SliceRoot, std::{ path::Path, sync::{atomic::AtomicBool, Arc}, @@ -635,7 +636,7 @@ pub mod test { &entries, true, // is_last_in_slot // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), 0, // next_shred_index, 0, // next_code_index &ReedSolomonCache::default(), diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 1d08dbc057..c5b4c4a840 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -11,6 +11,7 @@ use { solana_signer::Signer, solana_system_transaction as system_transaction, solana_votor::event::VotorEventSender, + solana_votor_messages::SliceRoot, std::collections::HashSet, }; @@ -38,7 +39,7 @@ pub struct BroadcastDuplicatesConfig { pub(super) struct BroadcastDuplicatesRun { config: BroadcastDuplicatesConfig, current_slot: Slot, - chained_merkle_root: Hash, + chained_merkle_root: SliceRoot, carryover_entry: Option, next_shred_index: u32, next_code_index: u32, @@ -60,7 +61,7 @@ impl BroadcastDuplicatesRun { )); Self { config, - chained_merkle_root: Hash::default(), + chained_merkle_root: SliceRoot(Hash::default()), carryover_entry: None, next_shred_index: u32::MAX, next_code_index: 0, diff --git a/turbine/src/broadcast_stage/broadcast_utils.rs b/turbine/src/broadcast_stage/broadcast_utils.rs index aaa282f4e4..77bb022793 100644 --- a/turbine/src/broadcast_stage/broadcast_utils.rs +++ b/turbine/src/broadcast_stage/broadcast_utils.rs @@ -12,6 +12,7 @@ use { solana_poh::poh_recorder::WorkingBankEntry, solana_runtime::bank::Bank, solana_votor::event::{CompletedBlock, VotorEvent, VotorEventSender}, + solana_votor_messages::{AlpenglowBlockId, SliceRoot}, std::{ sync::Arc, time::{Duration, Instant}, @@ -170,10 +171,10 @@ pub(super) fn get_chained_merkle_root_from_parent( slot: Slot, parent: Slot, blockstore: &Blockstore, -) -> Result { +) -> Result { if slot == parent { debug_assert_eq!(slot, 0u64); - return Ok(Hash::default()); + return Ok(SliceRoot(Hash::default())); } debug_assert!(parent < slot, "parent: {parent} >= slot: {slot}"); let index = blockstore @@ -197,9 +198,11 @@ pub(super) fn get_chained_merkle_root_from_parent( pub(super) fn set_block_id_and_send( votor_event_sender: &VotorEventSender, bank: Arc, - block_id: Hash, + chained_merkle_id: SliceRoot, + alpenglow_block_id: AlpenglowBlockId, ) -> Result<()> { - bank.set_block_id(Some(block_id)); + bank.set_chained_merkle_id(Some(chained_merkle_id)); + bank.set_alpenglow_block_id(Some(alpenglow_block_id)); if bank.is_frozen() { votor_event_sender.send(VotorEvent::Block(CompletedBlock { slot: bank.slot(), diff --git a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index d58aefc181..03d940e891 100644 --- a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -5,6 +5,7 @@ use { solana_keypair::Keypair, solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, solana_votor::event::VotorEventSender, + solana_votor_messages::SliceRoot, std::{thread::sleep, time::Duration}, tokio::sync::mpsc::Sender as AsyncSender, }; @@ -17,7 +18,7 @@ pub(super) struct FailEntryVerificationBroadcastRun { shred_version: u16, good_shreds: Vec, current_slot: Slot, - chained_merkle_root: Hash, + chained_merkle_root: SliceRoot, carryover_entry: Option, next_shred_index: u32, next_code_index: u32, @@ -35,7 +36,7 @@ impl FailEntryVerificationBroadcastRun { shred_version, good_shreds: vec![], current_slot: 0, - chained_merkle_root: Hash::default(), + chained_merkle_root: SliceRoot(Hash::default()), carryover_entry: None, next_shred_index: 0, next_code_index: 0, diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index 1155aecf14..f4410f7ef5 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -16,6 +16,7 @@ use { }, solana_time_utils::AtomicInterval, solana_votor::event::VotorEventSender, + solana_votor_messages::SliceRoot, std::{borrow::Cow, sync::RwLock}, tokio::sync::mpsc::Sender as AsyncSender, }; @@ -24,7 +25,7 @@ use { pub struct StandardBroadcastRun { slot: Slot, parent: Slot, - chained_merkle_root: Hash, + chained_merkle_root: SliceRoot, carryover_entry: Option, next_shred_index: u32, next_code_index: u32, @@ -56,7 +57,7 @@ impl StandardBroadcastRun { Self { slot: Slot::MAX, parent: Slot::MAX, - chained_merkle_root: Hash::default(), + chained_merkle_root: SliceRoot(Hash::default()), carryover_entry: None, next_shred_index: 0, next_code_index: 0, @@ -250,7 +251,7 @@ impl StandardBroadcastRun { .unwrap_or_else(|err: Error| { error!("Unknown chained Merkle root: {err:?}"); process_stats.err_unknown_chained_merkle_root += 1; - Hash::default() + SliceRoot(Hash::default()) }) }; self.slot = bank.slot(); @@ -337,11 +338,11 @@ impl StandardBroadcastRun { self.completed = true; // Populate the block id and send for voting - // The block id is the merkle root of the last FEC set which is now the chained merkle root broadcast_utils::set_block_id_and_send( votor_event_sender, bank.clone(), self.chained_merkle_root, + ag_id, // TO BE CONTINUED )?; } @@ -581,7 +582,7 @@ mod test { let next_shred_index = 10; let slot = 1; let parent = 0; - run.chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen()); + run.chained_merkle_root = SliceRoot(Hash::new_from_array(rand::thread_rng().gen())); run.next_shred_index = next_shred_index; run.next_code_index = 17; run.slot = slot; diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index b2886b39d9..f5e37dda0d 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -921,6 +921,7 @@ mod tests { solana_hash::Hash, solana_keypair::Keypair, solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, + solana_votor_messages::SliceRoot, }; fn get_keypair() -> Keypair { @@ -946,7 +947,7 @@ mod tests { &entries, true, // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Some(SliceRoot(Hash::new_from_array(rand::thread_rng().gen()))), 0, code_index, &rsc, diff --git a/turbine/src/sigverify_shreds.rs b/turbine/src/sigverify_shreds.rs index 99e0d2c7d7..3dcf1c8d9b 100644 --- a/turbine/src/sigverify_shreds.rs +++ b/turbine/src/sigverify_shreds.rs @@ -31,6 +31,7 @@ use { solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_signer::Signer, solana_streamer::{evicting_sender::EvictingSender, streamer::ChannelSend}, + solana_votor_messages::SliceRoot, std::{ num::NonZeroUsize, sync::{ @@ -378,7 +379,7 @@ fn verify_retransmitter_signature( Err(shred::Error::InvalidShredVariant) => return true, Err(_) => return false, }; - let Some(merkle_root) = shred::layout::get_merkle_root(shred) else { + let Some(SliceRoot(merkle_root)) = shred::layout::get_merkle_root(shred) else { return false; }; let Some(shred) = shred::layout::get_shred_id(shred) else { @@ -636,7 +637,7 @@ mod tests { &leader_keypair, &entries, true, - Some(Hash::new_unique()), + Some(SliceRoot(Hash::new_unique())), 0, 0, &ReedSolomonCache::default(), @@ -646,7 +647,7 @@ mod tests { &wrong_keypair, &entries, true, - Some(Hash::new_unique()), + Some(SliceRoot(Hash::new_unique())), 0, 0, &ReedSolomonCache::default(), @@ -700,7 +701,7 @@ mod tests { (bank_forks.working_bank(), bank_forks.root_bank()) }; - let chained_merkle_root = Some(Hash::new_from_array(rng.gen())); + let chained_merkle_root = Some(SliceRoot(Hash::new_from_array(rng.gen()))); let shredder = Shredder::new(root_bank.slot(), root_bank.parent_slot(), 0, 0).unwrap(); let entries = vec![Entry::new(&Hash::default(), 0, vec![])]; diff --git a/votor-messages/src/consensus_message.rs b/votor-messages/src/consensus_message.rs index 3bd3a35003..d3513bae51 100644 --- a/votor-messages/src/consensus_message.rs +++ b/votor-messages/src/consensus_message.rs @@ -1,17 +1,16 @@ //! Put BLS message here so all clients can agree on the format use { - crate::vote::Vote, + crate::{vote::Vote, AlpenglowBlockId}, serde::{Deserialize, Serialize}, solana_bls_signatures::Signature as BLSSignature, solana_clock::Slot, - solana_hash::Hash, }; /// The seed used to derive the BLS keypair pub const BLS_KEYPAIR_DERIVE_SEED: &[u8; 9] = b"alpenglow"; /// Block, a (slot, hash) tuple -pub type Block = (Slot, Hash); +pub type Block = (Slot, AlpenglowBlockId); #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] /// BLS vote message, we need rank to look up pubkey @@ -30,11 +29,11 @@ pub enum Certificate { /// Finalize certificate Finalize(Slot), /// Fast finalize certificate - FinalizeFast(Slot, Hash), + FinalizeFast(Slot, AlpenglowBlockId), /// Notarize certificate - Notarize(Slot, Hash), + Notarize(Slot, AlpenglowBlockId), /// Notarize fallback certificate - NotarizeFallback(Slot, Hash), + NotarizeFallback(Slot, AlpenglowBlockId), /// Skip certificate Skip(Slot), } @@ -55,14 +54,18 @@ pub enum CertificateType { } impl Certificate { - /// Create a new certificate ID from a CertificateType, Option, and Option - pub fn new(certificate_type: CertificateType, slot: Slot, hash: Option) -> Self { - match (certificate_type, hash) { + /// Create a new certificate ID from a CertificateType, Option, and Option + pub fn new( + certificate_type: CertificateType, + slot: Slot, + ag_id: Option, + ) -> Self { + match (certificate_type, ag_id) { (CertificateType::Finalize, None) => Certificate::Finalize(slot), - (CertificateType::FinalizeFast, Some(hash)) => Certificate::FinalizeFast(slot, hash), - (CertificateType::Notarize, Some(hash)) => Certificate::Notarize(slot, hash), - (CertificateType::NotarizeFallback, Some(hash)) => { - Certificate::NotarizeFallback(slot, hash) + (CertificateType::FinalizeFast, Some(ag_id)) => Certificate::FinalizeFast(slot, ag_id), + (CertificateType::Notarize, Some(ag_id)) => Certificate::Notarize(slot, ag_id), + (CertificateType::NotarizeFallback, Some(ag_id)) => { + Certificate::NotarizeFallback(slot, ag_id) } (CertificateType::Skip, None) => Certificate::Skip(slot), _ => panic!("Invalid certificate type and hash combination"), @@ -163,9 +166,9 @@ impl Certificate { /// the verifier uses to check the single aggregate signature. pub fn to_source_votes(&self) -> Option<(Vote, Vote)> { match self { - Certificate::NotarizeFallback(slot, hash) => { - let vote1 = Vote::new_notarization_vote(*slot, *hash); - let vote2 = Vote::new_notarization_fallback_vote(*slot, *hash); + Certificate::NotarizeFallback(slot, ag_id) => { + let vote1 = Vote::new_notarization_vote(*slot, *ag_id); + let vote2 = Vote::new_notarization_fallback_vote(*slot, *ag_id); Some((vote1, vote2)) } Certificate::Skip(slot) => { diff --git a/votor-messages/src/lib.rs b/votor-messages/src/lib.rs index ba2ee9995d..cdf049c136 100644 --- a/votor-messages/src/lib.rs +++ b/votor-messages/src/lib.rs @@ -2,9 +2,28 @@ #![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![deny(missing_docs)] +use { + serde::{Deserialize, Serialize}, + solana_hash::Hash, +}; + pub mod consensus_message; pub mod vote; #[cfg_attr(feature = "frozen-abi", macro_use)] #[cfg(feature = "frozen-abi")] extern crate solana_frozen_abi_macro; + +/// For every FEC set (AKA slice) of shreds, we have a Merkle tree over the shreds +/// signed by the leader. This is the root. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct SliceRoot(pub Hash); + +/// We locally build a second Merkle tree over the SliceRoots of a given block. +/// The root identifies the block and facilitates repair in Alpenglow. +#[derive( + Debug, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize, +)] +#[serde(transparent)] +pub struct AlpenglowBlockId(pub Hash); diff --git a/votor-messages/src/vote.rs b/votor-messages/src/vote.rs index f27e258727..ae3939ee7b 100644 --- a/votor-messages/src/vote.rs +++ b/votor-messages/src/vote.rs @@ -1,7 +1,7 @@ //! Vote data types for use by clients use { + crate::AlpenglowBlockId, serde::{Deserialize, Serialize}, - solana_hash::Hash, solana_program::clock::Slot, }; @@ -28,7 +28,7 @@ pub enum Vote { impl Vote { /// Create a new notarization vote - pub fn new_notarization_vote(slot: Slot, block_id: Hash) -> Self { + pub fn new_notarization_vote(slot: Slot, block_id: AlpenglowBlockId) -> Self { Self::from(NotarizationVote::new(slot, block_id)) } @@ -43,7 +43,7 @@ impl Vote { } /// Create a new notarization fallback vote - pub fn new_notarization_fallback_vote(slot: Slot, block_id: Hash) -> Self { + pub fn new_notarization_fallback_vote(slot: Slot, block_id: AlpenglowBlockId) -> Self { Self::from(NotarizationFallbackVote::new(slot, block_id)) } @@ -64,7 +64,7 @@ impl Vote { } /// The block id associated with the block which was voted for - pub fn block_id(&self) -> Option<&Hash> { + pub fn block_id(&self) -> Option<&AlpenglowBlockId> { match self { Self::Notarize(vote) => Some(vote.block_id()), Self::NotarizeFallback(vote) => Some(vote.block_id()), @@ -142,12 +142,12 @@ impl From for Vote { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct NotarizationVote { slot: Slot, - block_id: Hash, + block_id: AlpenglowBlockId, } impl NotarizationVote { /// Construct a notarization vote for `slot` - pub fn new(slot: Slot, block_id: Hash) -> Self { + pub fn new(slot: Slot, block_id: AlpenglowBlockId) -> Self { Self { slot, block_id } } @@ -157,7 +157,7 @@ impl NotarizationVote { } /// The block_id of the notarization slot - pub fn block_id(&self) -> &Hash { + pub fn block_id(&self) -> &AlpenglowBlockId { &self.block_id } } @@ -219,12 +219,12 @@ impl SkipVote { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] pub struct NotarizationFallbackVote { slot: Slot, - block_id: Hash, + block_id: AlpenglowBlockId, } impl NotarizationFallbackVote { /// Construct a notarization vote for `slot` - pub fn new(slot: Slot, block_id: Hash) -> Self { + pub fn new(slot: Slot, block_id: AlpenglowBlockId) -> Self { Self { slot, block_id } } @@ -234,7 +234,7 @@ impl NotarizationFallbackVote { } /// The block_id of the notarization slot - pub fn block_id(&self) -> &Hash { + pub fn block_id(&self) -> &AlpenglowBlockId { &self.block_id } } diff --git a/votor/src/consensus_pool.rs b/votor/src/consensus_pool.rs index e631eb53dd..dcaa9723ff 100644 --- a/votor/src/consensus_pool.rs +++ b/votor/src/consensus_pool.rs @@ -17,7 +17,6 @@ use { log::{error, trace}, solana_clock::{Epoch, Slot}, solana_epoch_schedule::EpochSchedule, - solana_hash::Hash, solana_pubkey::Pubkey, solana_runtime::{bank::Bank, epoch_stakes::VersionedEpochStakes}, solana_votor_messages::{ @@ -25,6 +24,7 @@ use { Block, CertificateMessage, CertificateType, ConsensusMessage, VoteMessage, }, vote::Vote, + AlpenglowBlockId, }, std::{ cmp::Ordering, @@ -136,7 +136,7 @@ impl ConsensusPool { pub fn new_from_root_bank(my_pubkey: Pubkey, bank: &Bank) -> Self { // To account for genesis and snapshots we allow default block id until // block id can be serialized as part of the snapshot - let root_block = (bank.slot(), bank.block_id().unwrap_or_default()); + let root_block = (bank.slot(), bank.alpenglow_block_id().unwrap_or_default()); let parent_ready_tracker = ParentReadyTracker::new(my_pubkey, root_block); Self { @@ -167,7 +167,7 @@ impl ConsensusPool { &mut self, slot: Slot, vote_type: VoteType, - block_id: Option, + block_id: Option, transaction: &VoteMessage, validator_vote_key: &Pubkey, validator_stake: Stake, @@ -199,7 +199,7 @@ impl ConsensusPool { fn update_certificates( &mut self, vote: &Vote, - block_id: Option, + block_id: Option, events: &mut Vec, total_stake: Stake, ) -> Result>, AddVoteError> { @@ -260,7 +260,7 @@ impl ConsensusPool { slot: Slot, vote_type: VoteType, validator_vote_key: &Pubkey, - block_id: &Option, + block_id: &Option, ) -> Option { for conflicting_type in conflicting_types(vote_type) { if let Some(pool) = self.vote_pools.get(&(slot, *conflicting_type)) { @@ -689,8 +689,9 @@ mod tests { }, }, solana_signer::Signer, - solana_votor_messages::consensus_message::{ - CertificateType, VoteMessage, BLS_KEYPAIR_DERIVE_SEED, + solana_votor_messages::{ + consensus_message::{CertificateType, VoteMessage, BLS_KEYPAIR_DERIVE_SEED}, + AlpenglowBlockId, }, std::sync::{Arc, RwLock}, test_case::test_case, @@ -956,7 +957,7 @@ mod tests { &mut pool, &bank_forks.read().unwrap().root_bank(), &validator_keypairs, - Vote::new_notarization_vote(5, Hash::default()), + Vote::new_notarization_vote(5, AlpenglowBlockId(Hash::default())), ); assert_eq!(pool.highest_notarized_slot(), 5); @@ -981,7 +982,7 @@ mod tests { &mut pool, &bank_forks.read().unwrap().root_bank(), &validator_keypairs, - Vote::new_notarization_vote(5, Hash::default()), + Vote::new_notarization_vote(5, AlpenglowBlockId(Hash::default())), ); assert_eq!(pool.highest_notarized_slot(), 5); @@ -1001,7 +1002,7 @@ mod tests { &mut pool, &bank_forks.read().unwrap().root_bank(), &validator_keypairs, - Vote::new_notarization_vote(5, Hash::default()), + Vote::new_notarization_vote(5, AlpenglowBlockId(Hash::default())), ); assert_eq!(pool.highest_notarized_slot(), 5); @@ -1030,7 +1031,7 @@ mod tests { &mut pool, &bank_forks.read().unwrap().root_bank(), &validator_keypairs, - Vote::new_notarization_vote(5, Hash::default()), + Vote::new_notarization_vote(5, AlpenglowBlockId(Hash::default())), ); assert_eq!(pool.highest_notarized_slot(), 5); @@ -1053,8 +1054,8 @@ mod tests { } #[test_case(Vote::new_finalization_vote(5), vec![CertificateType::Finalize])] - #[test_case(Vote::new_notarization_vote(6, Hash::new_unique()), vec![CertificateType::Notarize, CertificateType::NotarizeFallback])] - #[test_case(Vote::new_notarization_fallback_vote(7, Hash::new_unique()), vec![CertificateType::NotarizeFallback])] + #[test_case(Vote::new_notarization_vote(6, AlpenglowBlockId(Hash::new_unique())), vec![CertificateType::Notarize, CertificateType::NotarizeFallback])] + #[test_case(Vote::new_notarization_fallback_vote(7, AlpenglowBlockId(Hash::new_unique())), vec![CertificateType::NotarizeFallback])] #[test_case(Vote::new_skip_vote(8), vec![CertificateType::Skip])] #[test_case(Vote::new_skip_fallback_vote(9), vec![CertificateType::Skip])] fn test_add_vote_and_create_new_certificate_with_types( @@ -1151,15 +1152,15 @@ mod tests { #[test_case(CertificateType::Finalize, Vote::new_finalization_vote(5))] #[test_case( CertificateType::FinalizeFast, - Vote::new_notarization_vote(6, Hash::new_unique()) + Vote::new_notarization_vote(6, AlpenglowBlockId(Hash::new_unique())) )] #[test_case( CertificateType::Notarize, - Vote::new_notarization_vote(6, Hash::new_unique()) + Vote::new_notarization_vote(6, AlpenglowBlockId(Hash::new_unique())) )] #[test_case( CertificateType::NotarizeFallback, - Vote::new_notarization_fallback_vote(7, Hash::new_unique()) + Vote::new_notarization_fallback_vote(7, AlpenglowBlockId(Hash::new_unique())) )] #[test_case(CertificateType::Skip, Vote::new_skip_vote(8))] fn test_add_certificate_with_types(certificate_type: CertificateType, vote: Vote) { @@ -1595,7 +1596,7 @@ mod tests { // Create bank 2 let slot = 2; - let block_id = Hash::new_unique(); + let block_id = AlpenglowBlockId(Hash::new_unique()); // Add a skip from myself. let vote = Vote::new_skip_vote(2); @@ -1637,7 +1638,7 @@ mod tests { // Create bank 3 let slot = 3; - let block_id = Hash::new_unique(); + let block_id = AlpenglowBlockId(Hash::new_unique()); // Add 20% notarize, but no vote from myself, should fail for rank in 1..3 { @@ -1656,7 +1657,7 @@ mod tests { assert!(new_events.is_empty()); // Add a notarize from myself for some other block, but still not enough notar or skip, should fail. - let vote = Vote::new_notarization_vote(3, Hash::new_unique()); + let vote = Vote::new_notarization_vote(3, AlpenglowBlockId(Hash::new_unique())); assert!(pool .add_message( bank.epoch_schedule(), @@ -1700,7 +1701,7 @@ mod tests { // Add 20% notarization for another block, we should notify on new block_id // but not on the same block_id because we already sent the event - let duplicate_block_id = Hash::new_unique(); + let duplicate_block_id = AlpenglowBlockId(Hash::new_unique()); for rank in 7..9 { let vote = Vote::new_notarization_vote(3, duplicate_block_id); assert!(pool @@ -1734,7 +1735,7 @@ mod tests { let mut new_events = vec![]; // Add a notarize from myself. - let block_id = Hash::new_unique(); + let block_id = AlpenglowBlockId(Hash::new_unique()); let vote = Vote::new_notarization_vote(2, block_id); assert!(pool .add_message( @@ -1786,9 +1787,11 @@ mod tests { fn create_new_vote(vote_type: VoteType, slot: Slot) -> Vote { match vote_type { - VoteType::Notarize => Vote::new_notarization_vote(slot, Hash::default()), + VoteType::Notarize => { + Vote::new_notarization_vote(slot, AlpenglowBlockId(Hash::default())) + } VoteType::NotarizeFallback => { - Vote::new_notarization_fallback_vote(slot, Hash::default()) + Vote::new_notarization_fallback_vote(slot, AlpenglowBlockId(Hash::default())) } VoteType::Skip => Vote::new_skip_vote(slot), VoteType::SkipFallback => Vote::new_skip_fallback_vote(slot), @@ -1886,7 +1889,7 @@ mod tests { certificate: Certificate::new( CertificateType::FinalizeFast, 2, - Some(Hash::new_unique()), + Some(AlpenglowBlockId(Hash::new_unique())), ), signature: BLSSignature::default(), bitmap: Vec::new(), @@ -1928,7 +1931,11 @@ mod tests { .is_err()); // Send a cert on slot 2, it should be rejected - let certificate = Certificate::new(CertificateType::Notarize, 2, Some(Hash::new_unique())); + let certificate = Certificate::new( + CertificateType::Notarize, + 2, + Some(AlpenglowBlockId(Hash::new_unique())), + ); let cert = ConsensusMessage::Certificate(CertificateMessage { certificate, @@ -1959,7 +1966,7 @@ mod tests { certificate: Certificate::new( CertificateType::NotarizeFallback, 3, - Some(Hash::new_unique()), + Some(AlpenglowBlockId(Hash::new_unique())), ), signature: BLSSignature::default(), bitmap: Vec::new(), @@ -2000,7 +2007,11 @@ mod tests { // Add Notarize cert on 5 let cert_5 = CertificateMessage { - certificate: Certificate::new(CertificateType::Notarize, 5, Some(Hash::new_unique())), + certificate: Certificate::new( + CertificateType::Notarize, + 5, + Some(AlpenglowBlockId(Hash::new_unique())), + ), signature: BLSSignature::default(), bitmap: Vec::new(), }; @@ -2037,7 +2048,7 @@ mod tests { certificate: Certificate::new( CertificateType::FinalizeFast, 5, - Some(Hash::new_unique()), + Some(AlpenglowBlockId(Hash::new_unique())), ), signature: BLSSignature::default(), bitmap: Vec::new(), @@ -2062,7 +2073,11 @@ mod tests { // Now add Notarize cert on 6 let cert_6 = CertificateMessage { - certificate: Certificate::new(CertificateType::Notarize, 6, Some(Hash::new_unique())), + certificate: Certificate::new( + CertificateType::Notarize, + 6, + Some(AlpenglowBlockId(Hash::new_unique())), + ), signature: BLSSignature::default(), bitmap: Vec::new(), }; @@ -2105,7 +2120,7 @@ mod tests { certificate: Certificate::new( CertificateType::NotarizeFallback, 6, - Some(Hash::new_unique()), + Some(AlpenglowBlockId(Hash::new_unique())), ), signature: BLSSignature::default(), bitmap: Vec::new(), @@ -2172,7 +2187,11 @@ mod tests { ) .is_ok()); let cert_8_notarize = CertificateMessage { - certificate: Certificate::new(CertificateType::Notarize, 8, Some(Hash::new_unique())), + certificate: Certificate::new( + CertificateType::Notarize, + 8, + Some(AlpenglowBlockId(Hash::new_unique())), + ), signature: BLSSignature::default(), bitmap: Vec::new(), }; @@ -2203,7 +2222,7 @@ mod tests { let mut events = vec![]; // Add a notarization cert on slot 1 to 3 - let hash = Hash::new_unique(); + let hash = AlpenglowBlockId(Hash::new_unique()); for slot in 1..=3 { let cert = CertificateMessage { certificate: Certificate::new(CertificateType::Notarize, slot, Some(hash)), @@ -2306,7 +2325,7 @@ mod tests { fn test_vote_message_signature_verification() { let (validator_keypairs, _, _) = create_initial_state(); let rank_to_test = 3; - let vote = Vote::new_notarization_vote(42, Hash::new_unique()); + let vote = Vote::new_notarization_vote(42, AlpenglowBlockId(Hash::new_unique())); let consensus_message = dummy_transaction(&validator_keypairs, &vote, rank_to_test); let ConsensusMessage::Vote(vote_message) = consensus_message else { diff --git a/votor/src/consensus_pool/parent_ready_tracker.rs b/votor/src/consensus_pool/parent_ready_tracker.rs index b83eceeeb7..e09bc5a891 100644 --- a/votor/src/consensus_pool/parent_ready_tracker.rs +++ b/votor/src/consensus_pool/parent_ready_tracker.rs @@ -248,7 +248,7 @@ impl ParentReadyTracker { mod tests { use { super::*, itertools::Itertools, solana_clock::NUM_CONSECUTIVE_LEADER_SLOTS, - solana_hash::Hash, solana_pubkey::Pubkey, + solana_hash::Hash, solana_pubkey::Pubkey, solana_votor_messages::AlpenglowBlockId, }; #[test] @@ -258,7 +258,7 @@ mod tests { let mut events = vec![]; for i in 1..2 * NUM_CONSECUTIVE_LEADER_SLOTS { - let block = (i, Hash::new_unique()); + let block = (i, AlpenglowBlockId(Hash::new_unique())); tracker.add_new_notar_fallback_or_stronger(block, &mut events); assert_eq!(tracker.highest_parent_ready(), i + 1); assert!(tracker.parent_ready(i + 1, block)); @@ -270,7 +270,7 @@ mod tests { let genesis = Block::default(); let mut tracker = ParentReadyTracker::new(Pubkey::default(), genesis); let mut events = vec![]; - let block = (1, Hash::new_unique()); + let block = (1, AlpenglowBlockId(Hash::new_unique())); tracker.add_new_notar_fallback_or_stronger(block, &mut events); tracker.add_new_skip(1, &mut events); @@ -287,7 +287,7 @@ mod tests { let genesis = Block::default(); let mut tracker = ParentReadyTracker::new(Pubkey::default(), genesis); let mut events = vec![]; - let block = (1, Hash::new_unique()); + let block = (1, AlpenglowBlockId(Hash::new_unique())); tracker.add_new_skip(3, &mut events); tracker.add_new_skip(2, &mut events); @@ -304,7 +304,7 @@ mod tests { #[test] fn snapshot_wfsm() { let root_slot = 2147; - let root_block = (root_slot, Hash::new_unique()); + let root_block = (root_slot, AlpenglowBlockId(Hash::new_unique())); let mut tracker = ParentReadyTracker::new(Pubkey::default(), root_block); let mut events = vec![]; @@ -322,7 +322,7 @@ mod tests { assert!(tracker.parent_ready(root_slot + 3, root_block)); assert_eq!(tracker.highest_parent_ready(), root_slot + 3); - let block = (root_slot + 4, Hash::new_unique()); + let block = (root_slot + 4, AlpenglowBlockId(Hash::new_unique())); tracker.add_new_notar_fallback_or_stronger(block, &mut events); assert!(tracker.parent_ready(root_slot + 3, root_block)); assert!(tracker.parent_ready(root_slot + 5, block)); @@ -362,7 +362,10 @@ mod tests { BlockProductionParent::ParentNotReady ); - tracker.add_new_notar_fallback_or_stronger((4, Hash::new_unique()), &mut events); + tracker.add_new_notar_fallback_or_stronger( + (4, AlpenglowBlockId(Hash::new_unique())), + &mut events, + ); assert_eq!(tracker.highest_parent_ready(), 5); assert_eq!( tracker.block_production_parent(4), @@ -373,7 +376,10 @@ mod tests { tracker.block_production_parent(8), BlockProductionParent::ParentNotReady ); - tracker.add_new_notar_fallback_or_stronger((64, Hash::new_unique()), &mut events); + tracker.add_new_notar_fallback_or_stronger( + (64, AlpenglowBlockId(Hash::new_unique())), + &mut events, + ); assert_eq!(tracker.highest_parent_ready(), 65); assert_eq!( tracker.block_production_parent(8), @@ -389,7 +395,10 @@ mod tests { for i in 1..=10 { tracker.add_new_skip(i, &mut vec![]); - tracker.add_new_notar_fallback_or_stronger((i, Hash::new_unique()), &mut vec![]); + tracker.add_new_notar_fallback_or_stronger( + (i, AlpenglowBlockId(Hash::new_unique())), + &mut vec![], + ); } tracker.add_new_skip(11, &mut events); diff --git a/votor/src/consensus_pool/slot_stake_counters.rs b/votor/src/consensus_pool/slot_stake_counters.rs index 9247576a51..864637f215 100644 --- a/votor/src/consensus_pool/slot_stake_counters.rs +++ b/votor/src/consensus_pool/slot_stake_counters.rs @@ -4,8 +4,7 @@ use { SAFE_TO_NOTAR_MIN_NOTARIZE_AND_SKIP, SAFE_TO_NOTAR_MIN_NOTARIZE_FOR_NOTARIZE_OR_SKIP, SAFE_TO_NOTAR_MIN_NOTARIZE_ONLY, SAFE_TO_SKIP_THRESHOLD, }, - solana_hash::Hash, - solana_votor_messages::vote::Vote, + solana_votor_messages::{vote::Vote, AlpenglowBlockId}, std::collections::BTreeMap, }; @@ -15,9 +14,9 @@ pub(crate) struct SlotStakeCounters { total_stake: Stake, skip_total: Stake, notarize_total: Stake, - notarize_entry_total: BTreeMap, + notarize_entry_total: BTreeMap, top_notarized_stake: Stake, - safe_to_notar_sent: Vec, + safe_to_notar_sent: Vec, safe_to_skip_sent: bool, } @@ -77,7 +76,7 @@ impl SlotStakeCounters { } } - fn is_safe_to_notar(&self, block_id: &Hash, stake: &Stake) -> bool { + fn is_safe_to_notar(&self, block_id: &AlpenglowBlockId, stake: &Stake) -> bool { // White paper v1.1 page 22: The event is only issued if the node voted in slot s already, // but not to notarize b. Moreover: // notar(b) >= 40% or (skip(s) + notar(b) >= 60% and notar(b) >= 20%) @@ -122,7 +121,7 @@ impl SlotStakeCounters { #[cfg(test)] mod tests { - use {super::*, solana_votor_messages::vote::Vote}; + use {super::*, solana_hash::Hash, solana_votor_messages::vote::Vote}; #[test] fn test_safe_to_notar() { @@ -144,7 +143,7 @@ mod tests { // 40% of stake holders voted notarize counters.add_vote( - &Vote::new_notarization_vote(slot, Hash::default()), + &Vote::new_notarization_vote(slot, AlpenglowBlockId(Hash::default())), 40, false, &mut events, @@ -152,14 +151,14 @@ mod tests { ); assert_eq!(events.len(), 1); assert!( - matches!(events[0], VotorEvent::SafeToNotar((s, block_id)) if s == slot && block_id == Hash::default()) + matches!(events[0], VotorEvent::SafeToNotar((s, block_id)) if s == slot && block_id == AlpenglowBlockId(Hash::default())) ); assert_eq!(stats.event_safe_to_notarize, 1); events.clear(); // Adding more notarizations does not trigger more events counters.add_vote( - &Vote::new_notarization_vote(slot, Hash::default()), + &Vote::new_notarization_vote(slot, AlpenglowBlockId(Hash::default())), 20, false, &mut events, @@ -174,7 +173,7 @@ mod tests { stats = ConsensusPoolStats::default(); // I voted for notarize b - let hash_1 = Hash::new_unique(); + let hash_1 = AlpenglowBlockId(Hash::new_unique()); counters.add_vote( &Vote::new_notarization_vote(slot, hash_1), 1, @@ -186,7 +185,7 @@ mod tests { assert_eq!(stats.event_safe_to_notarize, 0); // 25% of stake holders voted notarize b' - let hash_2 = Hash::new_unique(); + let hash_2 = AlpenglowBlockId(Hash::new_unique()); counters.add_vote( &Vote::new_notarization_vote(slot, hash_2), 25, @@ -221,7 +220,7 @@ mod tests { let slot = 2; // I voted for notarize b counters.add_vote( - &Vote::new_notarization_vote(slot, Hash::default()), + &Vote::new_notarization_vote(slot, AlpenglowBlockId(Hash::default())), 10, true, &mut events, @@ -260,7 +259,7 @@ mod tests { stats = ConsensusPoolStats::default(); // I voted for notarize b, 10% of stake holders voted with me - let hash_1 = Hash::new_unique(); + let hash_1 = AlpenglowBlockId(Hash::new_unique()); counters.add_vote( &Vote::new_notarization_vote(slot, hash_1), 10, @@ -269,7 +268,7 @@ mod tests { &mut stats, ); // 20% of stake holders voted a different notarization b' - let hash_2 = Hash::new_unique(); + let hash_2 = AlpenglowBlockId(Hash::new_unique()); counters.add_vote( &Vote::new_notarization_vote(slot, hash_2), 20, diff --git a/votor/src/consensus_pool/vote_certificate_builder.rs b/votor/src/consensus_pool/vote_certificate_builder.rs index e0a63cca81..167d0e7604 100644 --- a/votor/src/consensus_pool/vote_certificate_builder.rs +++ b/votor/src/consensus_pool/vote_certificate_builder.rs @@ -158,12 +158,13 @@ mod tests { solana_votor_messages::{ consensus_message::{Certificate, CertificateType, VoteMessage}, vote::Vote, + AlpenglowBlockId, }, }; #[test] fn test_normal_build() { - let hash = Hash::new_unique(); + let hash = AlpenglowBlockId(Hash::new_unique()); let certificate = Certificate::new(CertificateType::NotarizeFallback, 1, Some(hash)); let mut builder = VoteCertificateBuilder::new(certificate); // Test building the certificate from Notarize and NotarizeFallback votes @@ -269,7 +270,7 @@ mod tests { #[test] fn test_builder_with_errors() { - let hash = Hash::new_unique(); + let hash = AlpenglowBlockId(Hash::new_unique()); let certificate = Certificate::new(CertificateType::NotarizeFallback, 1, Some(hash)); let mut builder = VoteCertificateBuilder::new(certificate); @@ -346,7 +347,7 @@ mod tests { #[test] fn test_certificate_verification_base2_encoding() { let slot = 10; - let hash = Hash::new_unique(); + let hash = AlpenglowBlockId(Hash::new_unique()); let certificate_id = Certificate::new(CertificateType::Notarize, slot, Some(hash)); // 1. Setup: Create keypairs and a single vote object. @@ -393,7 +394,7 @@ mod tests { #[test] fn test_certificate_verification_base3_encoding() { let slot = 20; - let hash = Hash::new_unique(); + let hash = AlpenglowBlockId(Hash::new_unique()); // A NotarizeFallback certificate can be composed of both Notarize and NotarizeFallback // votes. let certificate_id = Certificate::new(CertificateType::NotarizeFallback, slot, Some(hash)); diff --git a/votor/src/consensus_pool/vote_pool.rs b/votor/src/consensus_pool/vote_pool.rs index 9ff98685b0..5b920cb5bc 100644 --- a/votor/src/consensus_pool/vote_pool.rs +++ b/votor/src/consensus_pool/vote_pool.rs @@ -1,8 +1,7 @@ use { crate::{consensus_pool::vote_certificate_builder::VoteCertificateBuilder, Stake}, - solana_hash::Hash, solana_pubkey::Pubkey, - solana_votor_messages::consensus_message::VoteMessage, + solana_votor_messages::{consensus_message::VoteMessage, AlpenglowBlockId}, std::collections::{HashMap, HashSet}, }; @@ -86,9 +85,9 @@ impl VotePool for SimpleVotePool { pub(crate) struct DuplicateBlockVotePool { max_entries_per_pubkey: usize, - pub(crate) votes: HashMap, + pub(crate) votes: HashMap, total_stake: Stake, - prev_voted_block_ids: HashMap>, + prev_voted_block_ids: HashMap>, } impl DuplicateBlockVotePool { @@ -104,7 +103,7 @@ impl DuplicateBlockVotePool { pub fn add_vote( &mut self, validator_vote_key: &Pubkey, - voted_block_id: Hash, + voted_block_id: AlpenglowBlockId, transaction: &VoteMessage, validator_stake: Stake, ) -> Option { @@ -138,13 +137,17 @@ impl DuplicateBlockVotePool { Some(vote_entry.total_stake_by_key) } - pub fn total_stake_by_block_id(&self, block_id: &Hash) -> Stake { + pub fn total_stake_by_block_id(&self, block_id: &AlpenglowBlockId) -> Stake { self.votes .get(block_id) .map_or(0, |vote_entries| vote_entries.total_stake_by_key) } - pub fn add_to_certificate(&self, block_id: &Hash, output: &mut VoteCertificateBuilder) { + pub fn add_to_certificate( + &self, + block_id: &AlpenglowBlockId, + output: &mut VoteCertificateBuilder, + ) { if let Some(vote_entries) = self.votes.get(block_id) { output .aggregate(&vote_entries.transactions) @@ -155,7 +158,7 @@ impl DuplicateBlockVotePool { pub fn has_prev_validator_vote_for_block( &self, validator_vote_key: &Pubkey, - block_id: &Hash, + block_id: &AlpenglowBlockId, ) -> bool { self.prev_voted_block_ids .get(validator_vote_key) @@ -177,7 +180,8 @@ mod test { use { super::*, solana_bls_signatures::Signature as BLSSignature, - solana_votor_messages::{consensus_message::VoteMessage, vote::Vote}, + solana_hash::Hash, + solana_votor_messages::{consensus_message::VoteMessage, vote::Vote, AlpenglowBlockId}, }; #[test] @@ -208,7 +212,7 @@ mod test { fn test_notarization_pool() { let mut vote_pool = DuplicateBlockVotePool::new(1); let my_pubkey = Pubkey::new_unique(); - let block_id = Hash::new_unique(); + let block_id = AlpenglowBlockId(Hash::new_unique()); let vote = Vote::new_notarization_vote(3, block_id); let transaction = VoteMessage { vote, @@ -250,7 +254,7 @@ mod test { fn test_notarization_fallback_pool() { solana_logger::setup(); let mut vote_pool = DuplicateBlockVotePool::new(3); - let vote = Vote::new_notarization_fallback_vote(7, Hash::new_unique()); + let vote = Vote::new_notarization_fallback_vote(7, AlpenglowBlockId(Hash::new_unique())); let transaction = VoteMessage { vote, signature: BLSSignature::default(), @@ -258,7 +262,9 @@ mod test { }; let my_pubkey = Pubkey::new_unique(); - let block_ids: Vec = (0..4).map(|_| Hash::new_unique()).collect(); + let block_ids: Vec = (0..4) + .map(|_| AlpenglowBlockId(Hash::new_unique())) + .collect(); // Adding the first 3 votes should succeed, but total_stake should remain at 10 for block_id in &block_ids[0..3] { diff --git a/votor/src/consensus_pool_service.rs b/votor/src/consensus_pool_service.rs index 193d7e7c41..c2aa21504d 100644 --- a/votor/src/consensus_pool_service.rs +++ b/votor/src/consensus_pool_service.rs @@ -198,7 +198,10 @@ impl ConsensusPoolService { let mut standstill_timer = Instant::now(); // Kick off parent ready - let root_block = (root_bank.slot(), root_bank.block_id().unwrap_or_default()); + let root_block = ( + root_bank.slot(), + root_bank.alpenglow_block_id().unwrap_or_default(), + ); let mut highest_parent_ready = root_bank.slot(); events.push(VotorEvent::ParentReady { slot: root_bank.slot().checked_add(1).unwrap(), @@ -436,6 +439,7 @@ mod tests { Certificate, CertificateType, VoteMessage, BLS_KEYPAIR_DERIVE_SEED, }, vote::Vote, + AlpenglowBlockId, }, std::sync::{Arc, Mutex}, test_case::test_case, @@ -546,7 +550,7 @@ mod tests { let setup_result = setup(); // validator 0 to 7 send Notarize on slot 2 - let block_id = Hash::new_unique(); + let block_id = AlpenglowBlockId(Hash::new_unique()); let target_slot = 2; let notarize_vote = Vote::new_notarization_vote(target_slot, block_id); let messages_to_send = (0..8) @@ -696,7 +700,7 @@ mod tests { // A lot of the receiver needs a finalize certificate to trigger an exit if channel_name != "consensus_message_receiver" { let finalize_certificate = CertificateMessage { - certificate: Certificate::FinalizeFast(2, Hash::new_unique()), + certificate: Certificate::FinalizeFast(2, AlpenglowBlockId(Hash::new_unique())), signature: BLSSignature::default(), bitmap: vec![], }; diff --git a/votor/src/event_handler.rs b/votor/src/event_handler.rs index 3e19ff7f2c..be19e2fab7 100644 --- a/votor/src/event_handler.rs +++ b/votor/src/event_handler.rs @@ -24,7 +24,7 @@ use { solana_pubkey::Pubkey, solana_runtime::{bank::Bank, bank_forks::SetRootError}, solana_signer::Signer, - solana_votor_messages::{consensus_message::Block, vote::Vote}, + solana_votor_messages::{consensus_message::Block, vote::Vote, AlpenglowBlockId}, std::{ collections::{BTreeMap, BTreeSet}, sync::{ @@ -350,7 +350,8 @@ impl EventHandler { { return Ok(votes); } - info!("{my_pubkey}: Voting notarize-fallback for {slot} {block_id}"); + let block_id_hash = block_id.0; + info!("{my_pubkey}: Voting notarize-fallback for {slot} {block_id_hash}"); if let Some(bls_op) = generate_vote_message( Vote::new_notarization_fallback_vote(slot, block_id), false, @@ -475,7 +476,8 @@ impl EventHandler { vctx: &mut VotingContext, local_context: &mut LocalContext, ) -> Option<(Slot, Block)> { - let (slot, block_id) = finalized_block; + let (slot, hash) = finalized_block; + let block_id = hash; let first_slot_of_window = first_of_consecutive_leader_slots(slot); if first_slot_of_window == slot || first_slot_of_window == 0 { // No need to trigger parent ready for the first slot of the window @@ -492,13 +494,13 @@ impl EventHandler { // We haven't finished replay for the block, so we can't trigger parent ready return None; } - if bank.block_id() != Some(block_id) { + if bank.alpenglow_block_id() != Some(block_id) { // We have a different block id for the slot, repair should kick in later return None; } let parent_bank = bank.parent()?; let parent_slot = parent_bank.slot(); - let Some(parent_block_id) = parent_bank.block_id() else { + let Some(parent_ag_id) = parent_bank.alpenglow_block_id() else { // Maybe this bank is set to root after we drop bank_forks. error!( "{}: Unable to find block id for parent bank {parent_slot} to trigger parent ready", @@ -506,12 +508,13 @@ impl EventHandler { ); return None; }; + let parent_ag_id_hash = parent_ag_id.0; info!( "{}: Triggering parent ready for slot {slot} with parent {parent_slot} \ - {parent_block_id}", + {parent_ag_id_hash}", local_context.my_pubkey ); - Some((slot, (parent_slot, parent_block_id))) + Some((slot, (parent_slot, parent_ag_id))) } fn handle_set_identity( @@ -539,18 +542,19 @@ impl EventHandler { let slot = bank.slot(); let block = ( slot, - bank.block_id().expect("Block id must be set upstream"), + bank.alpenglow_block_id() + .expect("Block id must be set upstream"), ); let parent_slot = bank.parent_slot(); - let parent_block_id = bank.parent_block_id().unwrap_or_else(|| { + let parent_ag_id = bank.parent_alpenglow_block_id().unwrap_or_else(|| { // To account for child of genesis and snapshots we insert a // default block id here. Charlie is working on a SIMD to add block // id to snapshots, which can allow us to remove this and update // the default case in parent ready tracker. trace!("Using default block id for {slot} parent {parent_slot}"); - Hash::default() + AlpenglowBlockId(Hash::default()) }); - let parent_block = (parent_slot, parent_block_id); + let parent_block = (parent_slot, parent_ag_id); (block, parent_block) } @@ -591,8 +595,8 @@ impl EventHandler { return Ok(false); } } - - info!("{my_pubkey}: Voting notarize for {slot} {block_id}"); + let block_id_hash = block_id.0; + info!("{my_pubkey}: Voting notarize for {slot} {block_id_hash}"); if let Some(bls_op) = generate_vote_message( Vote::new_notarization_vote(slot, block_id), false, @@ -752,7 +756,7 @@ impl EventHandler { (slot > old_root && vctx.vote_history.voted(slot) && bank.is_frozen() - && bank.block_id().is_some_and(|bid| bid == block_id)) + && bank.alpenglow_block_id().is_some_and(|bid| bid == block_id)) .then_some(slot) }) .max() @@ -1052,7 +1056,7 @@ mod tests { parent_bank: Arc, ) -> Arc { let bank = Bank::new_from_parent(parent_bank, &Pubkey::new_unique(), slot); - bank.set_block_id(Some(Hash::new_unique())); + bank.set_alpenglow_block_id(Some(AlpenglowBlockId(Hash::new_unique()))); bank.freeze(); let mut bank_forks_w = test_context.bank_forks.write().unwrap(); bank_forks_w.insert(bank); @@ -1210,9 +1214,16 @@ mod tests { // If there is a parent ready for block 1 Notarization is sent out. let slot = 1; let parent_slot = 0; - send_parent_ready_event(&test_context, slot, (parent_slot, Hash::default())); + send_parent_ready_event( + &test_context, + slot, + (parent_slot, AlpenglowBlockId(Hash::default())), + ); sleep(TEST_SHORT_TIMEOUT); - check_parent_ready_slot(&test_context, (slot, (parent_slot, Hash::default()))); + check_parent_ready_slot( + &test_context, + (slot, (parent_slot, AlpenglowBlockId(Hash::default()))), + ); let root_bank = test_context .bank_forks .read() @@ -1220,7 +1231,7 @@ mod tests { .sharable_banks() .root(); let bank1 = create_block_and_send_block_event(&test_context, slot, root_bank); - let block_id_1 = bank1.block_id().unwrap(); + let block_id_1 = bank1.alpenglow_block_id().unwrap(); // We should receive Notarize Vote for block 1 check_for_vote( @@ -1235,7 +1246,7 @@ mod tests { let slot = 2; let bank2 = create_block_and_send_block_event(&test_context, slot, bank1.clone()); - let block_id_2 = bank2.block_id().unwrap(); + let block_id_2 = bank2.alpenglow_block_id().unwrap(); // Because 2 is middle of window, we should see Notarize vote for block 2 even without parentready check_for_vote( @@ -1251,7 +1262,7 @@ mod tests { // Slot 4 completed replay without parent ready or parent notarized should not trigger Notarize vote let slot = 4; let bank4 = create_block_and_send_block_event(&test_context, slot, bank2.clone()); - let block_id_4 = bank4.block_id().unwrap(); + let block_id_4 = bank4.alpenglow_block_id().unwrap(); check_no_vote_or_commitment(&test_context); // Send parent ready for slot 4 should trigger Notarize vote for slot 4 @@ -1281,12 +1292,12 @@ mod tests { .sharable_banks() .root(); let bank1 = create_block_and_send_block_event(&test_context, 1, root_bank); - let block_id_1 = bank1.block_id().unwrap(); + let block_id_1 = bank1.alpenglow_block_id().unwrap(); // Add parent ready for 0 to trigger notar vote for 1 - send_parent_ready_event(&test_context, 1, (0, Hash::default())); + send_parent_ready_event(&test_context, 1, (0, AlpenglowBlockId(Hash::default()))); sleep(TEST_SHORT_TIMEOUT); - check_parent_ready_slot(&test_context, (1, (0, Hash::default()))); + check_parent_ready_slot(&test_context, (1, (0, AlpenglowBlockId(Hash::default())))); check_for_vote(&test_context, &Vote::new_notarization_vote(1, block_id_1)); check_for_commitment(&test_context, AlpenglowCommitmentType::Notarize, 1); @@ -1295,7 +1306,7 @@ mod tests { check_for_vote(&test_context, &Vote::new_finalization_vote(1)); let bank2 = create_block_and_send_block_event(&test_context, 2, bank1.clone()); - let block_id_2 = bank2.block_id().unwrap(); + let block_id_2 = bank2.alpenglow_block_id().unwrap(); // Both Notarize and Finalize votes should trigger for 2 check_for_vote(&test_context, &Vote::new_notarization_vote(2, block_id_2)); check_for_commitment(&test_context, AlpenglowCommitmentType::Notarize, 2); @@ -1305,7 +1316,7 @@ mod tests { // Create bank3 but do not Notarize, so Finalize vote should not trigger let slot = 3; let bank3 = create_block_only(&test_context, slot, bank2.clone()); - let block_id_3 = bank3.block_id().unwrap(); + let block_id_3 = bank3.alpenglow_block_id().unwrap(); // Check no notarization vote for 3 check_no_vote_or_commitment(&test_context); @@ -1385,10 +1396,10 @@ mod tests { .sharable_banks() .root(); let bank_1 = create_block_and_send_block_event(&test_context, 1, root_bank); - let block_id_1_old = bank_1.block_id().unwrap(); - send_parent_ready_event(&test_context, 1, (0, Hash::default())); + let block_id_1_old = bank_1.alpenglow_block_id().unwrap(); + send_parent_ready_event(&test_context, 1, (0, AlpenglowBlockId(Hash::default()))); sleep(TEST_SHORT_TIMEOUT); - check_parent_ready_slot(&test_context, (1, (0, Hash::default()))); + check_parent_ready_slot(&test_context, (1, (0, AlpenglowBlockId(Hash::default())))); check_for_vote( &test_context, &Vote::new_notarization_vote(1, block_id_1_old), @@ -1396,7 +1407,7 @@ mod tests { check_for_commitment(&test_context, AlpenglowCommitmentType::Notarize, 1); // Now we got safe_to_notar event for slot 1 and a different block id - let block_id_1_1 = Hash::new_unique(); + let block_id_1_1 = AlpenglowBlockId(Hash::new_unique()); send_safe_to_notar_event(&test_context, (1, block_id_1_1)); // We should see rest of the window skipped check_for_vote(&test_context, &Vote::new_skip_vote(2)); @@ -1411,7 +1422,7 @@ mod tests { // In this test you can trigger this any number of times, but the white paper // proved we can only get up to 3 different block ids on a slot, and our // certificate pool implementation checks that. - let block_id_1_2 = Hash::new_unique(); + let block_id_1_2 = AlpenglowBlockId(Hash::new_unique()); send_safe_to_notar_event(&test_context, (1, block_id_1_2)); // No skips this time because we already skipped the rest of the window // We should also see notarize fallback for the new block id @@ -1437,10 +1448,10 @@ mod tests { .sharable_banks() .root(); let bank_1 = create_block_and_send_block_event(&test_context, 1, root_bank); - let block_id_1 = bank_1.block_id().unwrap(); - send_parent_ready_event(&test_context, 1, (0, Hash::default())); + let block_id_1 = bank_1.alpenglow_block_id().unwrap(); + send_parent_ready_event(&test_context, 1, (0, AlpenglowBlockId(Hash::default()))); sleep(TEST_SHORT_TIMEOUT); - check_parent_ready_slot(&test_context, (1, (0, Hash::default()))); + check_parent_ready_slot(&test_context, (1, (0, AlpenglowBlockId(Hash::default())))); check_for_vote(&test_context, &Vote::new_notarization_vote(1, block_id_1)); check_for_commitment(&test_context, AlpenglowCommitmentType::Notarize, 1); @@ -1463,7 +1474,7 @@ mod tests { // Produce a full window of blocks // Assume the leader for 1-3 is us, send produce window event - send_produce_window_event(&test_context, 1, 3, (0, Hash::default())); + send_produce_window_event(&test_context, 1, 3, (0, AlpenglowBlockId(Hash::default()))); // Check that leader_window_notifier is updated let window_info = test_context @@ -1482,12 +1493,12 @@ mod tests { assert_eq!(received_leader_window_info.end_slot, 3); assert_eq!( received_leader_window_info.parent_block, - (0, Hash::default()) + (0, AlpenglowBlockId(Hash::default())) ); drop(guard); // Suddenly I found out I produced block 1 already, send new produce window event - let block_id_1 = Hash::new_unique(); + let block_id_1 = AlpenglowBlockId(Hash::new_unique()); send_produce_window_event(&test_context, 2, 3, (1, block_id_1)); let window_info = test_context .leader_window_notifier @@ -1522,11 +1533,11 @@ mod tests { .sharable_banks() .root(); let bank1 = create_block_and_send_block_event(&test_context, 1, root_bank); - let block_id_1 = bank1.block_id().unwrap(); + let block_id_1 = bank1.alpenglow_block_id().unwrap(); - send_parent_ready_event(&test_context, 1, (0, Hash::default())); + send_parent_ready_event(&test_context, 1, (0, AlpenglowBlockId(Hash::default()))); sleep(TEST_SHORT_TIMEOUT); - check_parent_ready_slot(&test_context, (1, (0, Hash::default()))); + check_parent_ready_slot(&test_context, (1, (0, AlpenglowBlockId(Hash::default())))); check_for_vote(&test_context, &Vote::new_notarization_vote(1, block_id_1)); check_for_commitment(&test_context, AlpenglowCommitmentType::Notarize, 1); @@ -1559,10 +1570,10 @@ mod tests { .sharable_banks() .root(); let bank4 = create_block_and_send_block_event(&test_context, 4, root_bank); - let block_id_4 = bank4.block_id().unwrap(); + let block_id_4 = bank4.alpenglow_block_id().unwrap(); let bank5 = create_block_and_send_block_event(&test_context, 5, bank4.clone()); - let block_id_5 = bank5.block_id().unwrap(); + let block_id_5 = bank5.alpenglow_block_id().unwrap(); send_finalized_event(&test_context, (5, block_id_5), true); sleep(TEST_SHORT_TIMEOUT); @@ -1572,7 +1583,7 @@ mod tests { // We are partitioned off from rest of the network, and suddenly received finalize for // slot 9 a little before we finished replay slot 9 let bank9 = create_block_only(&test_context, 9, bank5.clone()); - let block_id_9 = bank9.block_id().unwrap(); + let block_id_9 = bank9.alpenglow_block_id().unwrap(); send_finalized_event(&test_context, (9, block_id_9), true); sleep(TEST_SHORT_TIMEOUT); send_block_event(&test_context, 9, bank9.clone()); @@ -1598,8 +1609,8 @@ mod tests { .sharable_banks() .root(); let bank1 = create_block_and_send_block_event(&test_context, 1, root_bank); - let block_id_1 = bank1.block_id().unwrap(); - send_parent_ready_event(&test_context, 1, (0, Hash::default())); + let block_id_1 = bank1.alpenglow_block_id().unwrap(); + send_parent_ready_event(&test_context, 1, (0, AlpenglowBlockId(Hash::default()))); sleep(TEST_SHORT_TIMEOUT); check_for_vote(&test_context, &Vote::new_notarization_vote(1, block_id_1)); sleep(TEST_SHORT_TIMEOUT); @@ -1659,7 +1670,7 @@ mod tests { .sharable_banks() .root(); let _ = create_block_and_send_block_event(&test_context, 1, root_bank.clone()); - send_parent_ready_event(&test_context, 1, (0, Hash::default())); + send_parent_ready_event(&test_context, 1, (0, AlpenglowBlockId(Hash::default()))); sleep(TEST_SHORT_TIMEOUT); // There should be no votes but we should see commitments for hot spares assert_eq!( @@ -1680,8 +1691,8 @@ mod tests { // We should now be able to vote again let slot = 4; let bank4 = create_block_and_send_block_event(&test_context, slot, root_bank); - let block_id_4 = bank4.block_id().unwrap(); - send_parent_ready_event(&test_context, slot, (0, Hash::default())); + let block_id_4 = bank4.alpenglow_block_id().unwrap(); + send_parent_ready_event(&test_context, slot, (0, AlpenglowBlockId(Hash::default()))); check_for_vote( &test_context, &Vote::new_notarization_vote(slot, block_id_4), @@ -1723,7 +1734,7 @@ mod tests { // We normally need some event hitting all the senders to trigger exit let root_bank = setup_result.bank_forks.read().unwrap().root_bank(); let _ = create_block_and_send_block_event(&setup_result, 1, root_bank); - send_parent_ready_event(&setup_result, 1, (0, Hash::default())); + send_parent_ready_event(&setup_result, 1, (0, AlpenglowBlockId(Hash::default()))); sleep(TEST_SHORT_TIMEOUT); // Verify that the event_handler exits within 5 seconds let start = Instant::now(); diff --git a/votor/src/root_utils.rs b/votor/src/root_utils.rs index 769a260cba..0099f08fbb 100644 --- a/votor/src/root_utils.rs +++ b/votor/src/root_utils.rs @@ -15,7 +15,7 @@ use { snapshot_controller::SnapshotController, }, solana_time_utils::timestamp, - solana_votor_messages::consensus_message::Block, + solana_votor_messages::{consensus_message::Block, AlpenglowBlockId}, std::{ collections::BTreeSet, sync::{Arc, RwLock}, @@ -46,7 +46,7 @@ pub(crate) fn set_root( info!("{my_pubkey}: setting root {new_root}"); vctx.vote_history.set_root(new_root); *pending_blocks = pending_blocks.split_off(&new_root); - *finalized_blocks = finalized_blocks.split_off(&(new_root, Hash::default())); + *finalized_blocks = finalized_blocks.split_off(&(new_root, AlpenglowBlockId(Hash::default()))); *received_shred = received_shred.split_off(&new_root); check_and_handle_new_root( diff --git a/votor/src/vote_history.rs b/votor/src/vote_history.rs index 70f17c9ffd..6864b6eeb4 100644 --- a/votor/src/vote_history.rs +++ b/votor/src/vote_history.rs @@ -7,7 +7,7 @@ use { solana_hash::Hash, solana_keypair::Keypair, solana_pubkey::Pubkey, - solana_votor_messages::{consensus_message::Block, vote::Vote}, + solana_votor_messages::{consensus_message::Block, vote::Vote, AlpenglowBlockId}, std::collections::{hash_map::Entry, HashMap, HashSet}, thiserror::Error, }; @@ -59,11 +59,11 @@ pub struct VoteHistory { /// The blocks for which this node has cast a notarization vote /// In the format of slot, block_id, bank_hash - voted_notar: HashMap, + voted_notar: HashMap, /// The blocks for which this node has cast a notarization fallback /// vote in this slot - voted_notar_fallback: HashMap>, + voted_notar_fallback: HashMap>, /// The slots for which this node has cast a skip fallback vote voted_skip_fallback: HashSet, @@ -108,13 +108,13 @@ impl VoteHistory { } /// The block for which we voted notarize in slot `slot` - pub fn voted_notar(&self, slot: Slot) -> Option { + pub fn voted_notar(&self, slot: Slot) -> Option { assert!(slot >= self.root); self.voted_notar.get(&slot).copied() } /// Whether we voted notarize fallback in `slot` for block `(block_id, bank_hash)` - pub fn voted_notar_fallback(&self, slot: Slot, block_id: Hash) -> bool { + pub fn voted_notar_fallback(&self, slot: Slot, block_id: AlpenglowBlockId) -> bool { assert!(slot >= self.root); self.voted_notar_fallback .get(&slot) diff --git a/votor/src/voting_utils.rs b/votor/src/voting_utils.rs index a6ab1bf8d8..4cba474e50 100644 --- a/votor/src/voting_utils.rs +++ b/votor/src/voting_utils.rs @@ -341,6 +341,7 @@ mod tests { create_genesis_config_with_alpenglow_vote_accounts, ValidatorVoteKeypairs, }, }, + solana_votor_messages::AlpenglowBlockId, std::sync::{Arc, RwLock}, }; @@ -409,7 +410,7 @@ mod tests { .unwrap(); // Generate a normal notarization vote and check it's sent out correctly. - let block_id = Hash::new_unique(); + let block_id = AlpenglowBlockId(Hash::new_unique()); let vote_slot = 2; let vote = Vote::new_notarization_vote(vote_slot, block_id); let result = generate_vote_message(vote, false, &mut voting_context) @@ -510,7 +511,7 @@ mod tests { // Wrong identity keypair voting_context.identity_keypair = Arc::new(Keypair::new()); - let vote = Vote::new_notarization_vote(6, Hash::new_unique()); + let vote = Vote::new_notarization_vote(6, AlpenglowBlockId(Hash::new_unique())); assert!(generate_vote_message(vote, true, &mut voting_context) .unwrap() .is_none()); @@ -536,7 +537,7 @@ mod tests { // Wrong vote account pubkey voting_context.vote_account_pubkey = Pubkey::new_unique(); - let vote = Vote::new_notarization_vote(7, Hash::new_unique()); + let vote = Vote::new_notarization_vote(7, AlpenglowBlockId(Hash::new_unique())); assert!(generate_vote_message(vote, true, &mut voting_context) .unwrap() .is_none()); @@ -564,7 +565,7 @@ mod tests { validator_keypairs[my_index].vote_keypair.pubkey(), Arc::new(BLSKeypair::new()), ); - let vote = Vote::new_notarization_vote(8, Hash::new_unique()); + let vote = Vote::new_notarization_vote(8, AlpenglowBlockId(Hash::new_unique())); assert!(generate_vote_message(vote, true, &mut voting_context) .unwrap() .is_none()); @@ -584,7 +585,7 @@ mod tests { setup_voting_context_and_bank_forks(own_vote_sender, &validator_keypairs, my_index); // If we try to vote for a slot in the future, we should panic - let vote = Vote::new_notarization_vote(1_000_000_000, Hash::new_unique()); + let vote = Vote::new_notarization_vote(1_000_000_000, AlpenglowBlockId(Hash::new_unique())); let _ = generate_vote_message(vote, false, &mut voting_context); } @@ -635,7 +636,10 @@ mod tests { .root() .epoch_schedule() .get_first_slot_in_epoch(1); - let vote = Vote::new_notarization_vote(first_slot_in_epoch_1, Hash::new_unique()); + let vote = Vote::new_notarization_vote( + first_slot_in_epoch_1, + AlpenglowBlockId(Hash::new_unique()), + ); assert!(generate_vote_message(vote, false, &mut voting_context) .unwrap() .is_some()); @@ -646,7 +650,10 @@ mod tests { .root() .epoch_schedule() .get_first_slot_in_epoch(2); - let vote = Vote::new_notarization_vote(first_slot_in_epoch_2, Hash::new_unique()); + let vote = Vote::new_notarization_vote( + first_slot_in_epoch_2, + AlpenglowBlockId(Hash::new_unique()), + ); assert!(generate_vote_message(vote, false, &mut voting_context) .unwrap() .is_none()); diff --git a/votor/src/votor.rs b/votor/src/votor.rs index 216c7d96c1..3744e16170 100644 --- a/votor/src/votor.rs +++ b/votor/src/votor.rs @@ -58,7 +58,6 @@ use { parking_lot::RwLock as PlRwLock, solana_clock::Slot, solana_gossip::cluster_info::ClusterInfo, - solana_hash::Hash, solana_keypair::Keypair, solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache}, solana_pubkey::Pubkey, @@ -70,7 +69,7 @@ use { bank_forks::BankForks, installed_scheduler_pool::BankWithScheduler, snapshot_controller::SnapshotController, }, - solana_votor_messages::consensus_message::ConsensusMessage, + solana_votor_messages::{consensus_message::ConsensusMessage, AlpenglowBlockId}, std::{ collections::HashMap, sync::{ @@ -87,7 +86,7 @@ use { pub struct LeaderWindowNotifier { pub window_info: Mutex>, pub window_notification: Condvar, - pub highest_parent_ready: RwLock<(Slot, (Slot, Hash))>, + pub highest_parent_ready: RwLock<(Slot, (Slot, AlpenglowBlockId))>, } /// Inputs to Votor