From 2987e6378076417f50a07598939907965405040c Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 18 Feb 2025 14:46:18 -0700 Subject: [PATCH 01/20] Add new gossip message with deserialization stuff and new trait --- crates/services/p2p/src/codecs/postcard.rs | 6 +++ crates/services/p2p/src/gossipsub/messages.rs | 4 ++ crates/services/p2p/src/gossipsub/topics.rs | 8 +++ crates/services/p2p/src/p2p_service.rs | 7 ++- crates/services/p2p/src/service.rs | 51 ++++++++++++++++--- crates/types/src/services/p2p.rs | 10 ++++ 6 files changed, 78 insertions(+), 8 deletions(-) diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 347f5a4cbfc..2ce67e3e2c6 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -158,6 +158,9 @@ impl GossipsubCodec for PostcardCodec { fn encode(&self, data: Self::RequestMessage) -> Result, io::Error> { let encoded_data = match data { GossipsubBroadcastRequest::NewTx(tx) => postcard::to_stdvec(&*tx), + GossipsubBroadcastRequest::Confirmations(confirmations) => { + postcard::to_stdvec(&*confirmations) + } }; encoded_data.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) @@ -170,6 +173,9 @@ impl GossipsubCodec for PostcardCodec { ) -> Result { let decoded_response = match gossipsub_tag { GossipTopicTag::NewTx => GossipsubMessage::NewTx(deserialize(encoded_data)?), + GossipTopicTag::TxConfirmations => { + GossipsubMessage::Confirmations(deserialize(encoded_data)?) + } }; Ok(decoded_response) diff --git a/crates/services/p2p/src/gossipsub/messages.rs b/crates/services/p2p/src/gossipsub/messages.rs index 07070685991..32175e93ce4 100644 --- a/crates/services/p2p/src/gossipsub/messages.rs +++ b/crates/services/p2p/src/gossipsub/messages.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use fuel_core_types::fuel_tx::Transaction; +use fuel_core_types::services::p2p::TxConfirmations; use serde::{ Deserialize, Serialize, @@ -12,6 +13,7 @@ use serde::{ #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum GossipTopicTag { NewTx, + TxConfirmations, } /// Takes `Arc` and wraps it in a matching GossipsubBroadcastRequest @@ -20,9 +22,11 @@ pub enum GossipTopicTag { #[derive(Debug, Clone)] pub enum GossipsubBroadcastRequest { NewTx(Arc), + Confirmations(Arc), } #[derive(Serialize, Deserialize, Debug, Clone)] pub enum GossipsubMessage { NewTx(Transaction), + Confirmations(TxConfirmations), } diff --git a/crates/services/p2p/src/gossipsub/topics.rs b/crates/services/p2p/src/gossipsub/topics.rs index ac8e9efb105..9d97142c71b 100644 --- a/crates/services/p2p/src/gossipsub/topics.rs +++ b/crates/services/p2p/src/gossipsub/topics.rs @@ -10,6 +10,7 @@ use super::messages::{ }; pub const NEW_TX_GOSSIP_TOPIC: &str = "new_tx"; +pub const TX_CONFIRMATIONS_GOSSIP_TOPIC: &str = "tx_confirmations"; /// Holds used Gossipsub Topics /// Each field contains TopicHash of existing topics @@ -17,15 +18,19 @@ pub const NEW_TX_GOSSIP_TOPIC: &str = "new_tx"; #[derive(Debug)] pub struct GossipsubTopics { new_tx_topic: TopicHash, + tx_confirmations_topic: TopicHash, } impl GossipsubTopics { pub fn new(network_name: &str) -> Self { let new_tx_topic: Sha256Topic = Topic::new(format!("{NEW_TX_GOSSIP_TOPIC}/{network_name}")); + let tx_confirmations_topic: Sha256Topic = + Topic::new(format!("{TX_CONFIRMATIONS_GOSSIP_TOPIC}/{network_name}")); Self { new_tx_topic: new_tx_topic.hash(), + tx_confirmations_topic: tx_confirmations_topic.hash(), } } @@ -48,6 +53,9 @@ impl GossipsubTopics { ) -> TopicHash { match outgoing_request { GossipsubBroadcastRequest::NewTx(_) => self.new_tx_topic.clone(), + GossipsubBroadcastRequest::Confirmations(_) => { + self.tx_confirmations_topic.clone() + } } } } diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index db87eb95151..97ff8a848e6 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1653,6 +1653,9 @@ mod tests { let selected_topic: Sha256Topic = { let topic = match broadcast_request { GossipsubBroadcastRequest::NewTx(_) => NEW_TX_GOSSIP_TOPIC, + GossipsubBroadcastRequest::Confirmations(_) => { + unimplemented!() + } }; Topic::new(format!("{}/{}", topic, p2p_config.network_name)) @@ -1723,7 +1726,9 @@ mod tests { panic!("Wrong GossipsubMessage") } } - } + GossipsubMessage::Confirmations(_) => { + unimplemented!() + }} // Node B received the correct message // If we try to publish it again we will get `PublishError::Duplicate` diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index bfd5f187e30..e373aa846b2 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -60,6 +60,7 @@ use fuel_core_types::{ PeerReport, }, BlockHeightHeartbeatData, + ConfirmationsGossipData, GossipData, GossipsubMessageAcceptance, GossipsubMessageInfo, @@ -76,6 +77,7 @@ use futures::{ use libp2p::{ gossipsub::{ MessageAcceptance, + MessageId, PublishError, }, request_response::InboundRequestId, @@ -368,6 +370,11 @@ pub trait Broadcast: Send { fn tx_broadcast(&self, transaction: TransactionGossipData) -> anyhow::Result<()>; + fn confirmations_broadcast( + &self, + confirmations: ConfirmationsGossipData, + ) -> anyhow::Result<()>; + fn new_tx_subscription_broadcast(&self, peer_id: FuelPeerId) -> anyhow::Result<()>; } @@ -394,6 +401,13 @@ impl Broadcast for SharedState { Ok(()) } + fn confirmations_broadcast( + &self, + _confirmations: ConfirmationsGossipData, + ) -> anyhow::Result<()> { + todo!(); + } + fn new_tx_subscription_broadcast(&self, peer_id: FuelPeerId) -> anyhow::Result<()> { self.new_tx_subscription_broadcast.send(peer_id)?; Ok(()) @@ -440,6 +454,28 @@ pub struct Task { cached_view: Arc, } +impl Task { + pub(crate) fn broadcast_gossip_message( + &self, + message: GossipsubMessage, + message_id: MessageId, + peer_id: PeerId, + ) { + let message_id = message_id.0; + + match message { + GossipsubMessage::NewTx(transaction) => { + let next_transaction = GossipData::new(transaction, peer_id, message_id); + let _ = self.broadcast.tx_broadcast(next_transaction); + } + GossipsubMessage::Confirmations(confirmations) => { + let data = GossipData::new(confirmations, peer_id, message_id); + let _ = self.broadcast.confirmations_broadcast(data); + } + } + } +} + #[derive(Default, Clone)] pub struct HeartbeatPeerReputationConfig { old_heartbeat_penalty: AppScore, @@ -986,14 +1022,8 @@ where let _ = self.broadcast.block_height_broadcast(block_height_data); } Some(FuelP2PEvent::GossipsubMessage { message, message_id, peer_id,.. }) => { - let message_id = message_id.0; - match message { - GossipsubMessage::NewTx(transaction) => { - let next_transaction = GossipData::new(transaction, peer_id, message_id); - let _ = self.broadcast.tx_broadcast(next_transaction); - }, - } + self.broadcast_gossip_message(message, message_id, peer_id); }, Some(FuelP2PEvent::InboundRequestMessage { request_message, request_id }) => { let res = self.process_request(request_message, request_id); @@ -1642,6 +1672,13 @@ pub mod tests { todo!() } + fn confirmations_broadcast( + &self, + _confirmations: ConfirmationsGossipData, + ) -> anyhow::Result<()> { + todo!() + } + fn new_tx_subscription_broadcast( &self, _peer_id: FuelPeerId, diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index e6659a7db54..67a9e564d79 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -72,6 +72,16 @@ pub struct GossipData { /// Transactions gossiped by peers for inclusion into a block pub type TransactionGossipData = GossipData; +/// Transactions that have been confirmed by block producer +pub type ConfirmationsGossipData = GossipData; + +/// List of transactions that have been confirmed with block producer's signature +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TxConfirmations { + signature: String, + txs: Vec, +} + #[derive(Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] /// The source of some network data. pub struct SourcePeer { From c58d79b9442dde688ab885dc9d76f0fa1e1dcc88 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 18 Feb 2025 20:23:00 -0700 Subject: [PATCH 02/20] Add new topic to behavior creation --- crates/services/p2p/src/gossipsub/config.rs | 16 ++++- crates/services/p2p/src/gossipsub/topics.rs | 3 + crates/services/p2p/src/p2p_service.rs | 76 +++++++++++++++++---- crates/types/src/services/p2p.rs | 11 +++ 4 files changed, 90 insertions(+), 16 deletions(-) diff --git a/crates/services/p2p/src/gossipsub/config.rs b/crates/services/p2p/src/gossipsub/config.rs index 2f6973e8ee9..fd7a7b7fc86 100644 --- a/crates/services/p2p/src/gossipsub/config.rs +++ b/crates/services/p2p/src/gossipsub/config.rs @@ -1,4 +1,7 @@ -use super::topics::NEW_TX_GOSSIP_TOPIC; +use super::topics::{ + NEW_TX_GOSSIP_TOPIC, + TX_CONFIRMATIONS_GOSSIP_TOPIC, +}; use crate::{ config::{ Config, @@ -49,6 +52,8 @@ const MESH_SIZE: usize = 8; // The weight applied to the score for delivering new transactions. const NEW_TX_GOSSIP_WEIGHT: f64 = 0.05; +const TX_CONFIRMATIONS_GOSSIP_WEIGHT: f64 = 0.05; + // The threshold for a peer's score to be considered for greylisting. // If a peer's score falls below this value, they will be greylisted. // Greylisting is a lighter form of banning, where the peer's messages might be ignored or given lower priority, @@ -222,7 +227,14 @@ fn initialize_gossipsub(gossipsub: &mut gossipsub::Behaviour, p2p_config: &Confi .with_peer_score(peer_score_params, peer_score_thresholds) .expect("gossipsub initialized with peer score"); - let topics = vec![(NEW_TX_GOSSIP_TOPIC, NEW_TX_GOSSIP_WEIGHT)]; + // TODO: Make topics configurable. + let topics = vec![ + (NEW_TX_GOSSIP_TOPIC, NEW_TX_GOSSIP_WEIGHT), + ( + TX_CONFIRMATIONS_GOSSIP_TOPIC, + TX_CONFIRMATIONS_GOSSIP_WEIGHT, + ), + ]; // subscribe to gossipsub topics with the network name suffix for (topic, weight) in topics { diff --git a/crates/services/p2p/src/gossipsub/topics.rs b/crates/services/p2p/src/gossipsub/topics.rs index 9d97142c71b..fa726768bbe 100644 --- a/crates/services/p2p/src/gossipsub/topics.rs +++ b/crates/services/p2p/src/gossipsub/topics.rs @@ -41,6 +41,9 @@ impl GossipsubTopics { ) -> Option { match incoming_topic { hash if hash == &self.new_tx_topic => Some(GossipTopicTag::NewTx), + hash if hash == &self.tx_confirmations_topic => { + Some(GossipTopicTag::TxConfirmations) + } _ => None, } } diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 97ff8a848e6..798b491a4cc 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -910,10 +910,14 @@ mod tests { config::Config, gossipsub::{ messages::{ + GossipTopicTag, GossipsubBroadcastRequest, GossipsubMessage, }, - topics::NEW_TX_GOSSIP_TOPIC, + topics::{ + NEW_TX_GOSSIP_TOPIC, + TX_CONFIRMATIONS_GOSSIP_TOPIC, + }, }, p2p_service::FuelP2PEvent, peer_manager::PeerInfo, @@ -945,6 +949,7 @@ mod tests { GossipsubMessageAcceptance, NetworkableTransactionPool, Transactions, + TxConfirmations, }, }; use futures::{ @@ -1482,7 +1487,11 @@ mod tests { #[tokio::test] #[instrument] - async fn gossipsub_broadcast_tx_with_accept() { + async fn gossipsub_broadcast_tx_with_accept__new_tx() { + tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .with_target(false) + .init(); for _ in 0..100 { tokio::time::timeout( Duration::from_secs(5), @@ -1499,6 +1508,29 @@ mod tests { } } + #[tokio::test] + #[instrument] + async fn gossipsub_broadcast_tx_with_accept__tx_confirmations() { + tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .with_target(false) + .init(); + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(20), + gossipsub_broadcast( + GossipsubBroadcastRequest::Confirmations(Arc::new( + TxConfirmations::default_test_tx(), + )), + GossipsubMessageAcceptance::Accept, + None, + ), + ) + .await + .unwrap(); + } + } + #[tokio::test] #[instrument] async fn gossipsub_broadcast_tx_with_reject() { @@ -1650,16 +1682,23 @@ mod tests { p2p_config.max_gossipsub_peers_connected = connection_limit; } - let selected_topic: Sha256Topic = { - let topic = match broadcast_request { - GossipsubBroadcastRequest::NewTx(_) => NEW_TX_GOSSIP_TOPIC, - GossipsubBroadcastRequest::Confirmations(_) => { - unimplemented!() + let (selected_topic, selected_tag): (Sha256Topic, GossipTopicTag) = { + let (topic, tag) = match broadcast_request { + GossipsubBroadcastRequest::NewTx(_) => { + (NEW_TX_GOSSIP_TOPIC, GossipTopicTag::NewTx) } + GossipsubBroadcastRequest::Confirmations(_) => ( + TX_CONFIRMATIONS_GOSSIP_TOPIC, + GossipTopicTag::TxConfirmations, + ), }; - Topic::new(format!("{}/{}", topic, p2p_config.network_name)) + ( + Topic::new(format!("{}/{}", topic, p2p_config.network_name)), + tag, + ) }; + tracing::info!("Selected Topic: {:?}", selected_topic); let mut message_sent = false; @@ -1693,17 +1732,26 @@ mod tests { tokio::select! { node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::NewSubscription { peer_id, .. }) = &node_a_event { - if peer_id == &node_b.local_peer_id { - a_connected_to_b = true; + if let Some(FuelP2PEvent::NewSubscription { peer_id, tag }) = &node_a_event { + if tag != &selected_tag { + tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); + } else { + if peer_id == &node_b.local_peer_id { + a_connected_to_b = true; + } } } tracing::info!("Node A Event: {:?}", node_a_event); }, node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::NewSubscription { peer_id, .. }) = &node_b_event { - if peer_id == &node_c.local_peer_id { - b_connected_to_c = true; + if let Some(FuelP2PEvent::NewSubscription { peer_id,tag, }) = &node_b_event { + tracing::info!("New subscription for peer_id: {:?} with tag: {:?}", peer_id, tag); + if tag != &selected_tag { + tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); + } else { + if peer_id == &node_c.local_peer_id { + b_connected_to_c = true; + } } } diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 67a9e564d79..48272d7cb31 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -82,6 +82,17 @@ pub struct TxConfirmations { txs: Vec, } +#[cfg(feature = "test-helpers")] +impl TxConfirmations { + /// Test helper for creating arbitrary, meaningless `TxConfirmations` data + pub fn default_test_tx() -> Self { + Self { + signature: "Not a real signature".to_string(), + txs: vec![Transaction::default_test_tx()], + } + } +} + #[derive(Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] /// The source of some network data. pub struct SourcePeer { From bbd92e4fe0a22fd4cb50645eb42e6d494512da92 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 09:55:47 -0700 Subject: [PATCH 03/20] Get test passing --- crates/services/p2p/src/p2p_service.rs | 10 +++++++--- crates/types/src/services/p2p.rs | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 798b491a4cc..91ede4683b9 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1774,9 +1774,13 @@ mod tests { panic!("Wrong GossipsubMessage") } } - GossipsubMessage::Confirmations(_) => { - unimplemented!() - }} + GossipsubMessage::Confirmations(confirmations) => { + if confirmations != &TxConfirmations::default_test_tx() { + tracing::error!("Wrong p2p message {:?}", message); + panic!("Wrong GossipsubMessage") + } + } + } // Node B received the correct message // If we try to publish it again we will get `PublishError::Duplicate` diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 48272d7cb31..041ee754b42 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -76,7 +76,7 @@ pub type TransactionGossipData = GossipData; pub type ConfirmationsGossipData = GossipData; /// List of transactions that have been confirmed with block producer's signature -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct TxConfirmations { signature: String, txs: Vec, From 943205702d8c0b778a613e8ccbce14fe648d544a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 12:00:00 -0700 Subject: [PATCH 04/20] Cleanup test, make more robust --- crates/services/p2p/src/p2p_service.rs | 39 +++++++++++++++----------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 91ede4683b9..99f3d85d627 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -901,6 +901,8 @@ impl FuelP2PService { #[allow(clippy::cast_possible_truncation)] #[cfg(test)] mod tests { + #![allow(non_snake_case)] + use super::{ FuelP2PService, PublishError, @@ -972,7 +974,10 @@ mod tests { use rand::Rng; use std::{ collections::HashSet, - ops::Range, + ops::{ + Deref, + Range, + }, sync::Arc, time::Duration, }; @@ -1766,21 +1771,7 @@ mod tests { panic!("Wrong Topic"); } - // received value should match sent value - match &message { - GossipsubMessage::NewTx(tx) => { - if tx != &Transaction::default_test_tx() { - tracing::error!("Wrong p2p message {:?}", message); - panic!("Wrong GossipsubMessage") - } - } - GossipsubMessage::Confirmations(confirmations) => { - if confirmations != &TxConfirmations::default_test_tx() { - tracing::error!("Wrong p2p message {:?}", message); - panic!("Wrong GossipsubMessage") - } - } - } + check_message_matches_request(&message, &broadcast_request); // Node B received the correct message // If we try to publish it again we will get `PublishError::Duplicate` @@ -1819,6 +1810,22 @@ mod tests { } } + fn check_message_matches_request( + message: &GossipsubMessage, + expected: &GossipsubBroadcastRequest, + ) { + match (message, expected) { + (GossipsubMessage::NewTx(received), GossipsubBroadcastRequest::NewTx(requested)) => { + assert_eq!(requested.deref(), received, "Both messages were `NewTx`s, but the received message did not match the requested message"); + } + ( + GossipsubMessage::Confirmations(received), + GossipsubBroadcastRequest::Confirmations(requested), + ) => assert_eq!(requested.deref(), received, "Both messages were `Confirmations`, but the received message did not match the requested message"), + _ => panic!("Message does not match the expected request, expected: {:?}, actual: {:?}", expected, message), + } + } + fn arbitrary_headers_for_range(range: Range) -> Vec { let mut blocks = Vec::new(); for i in range { From 2da50de8df2c333dc604a702d740b8e38427c561 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 12:10:27 -0700 Subject: [PATCH 05/20] Add reject test --- crates/services/p2p/src/p2p_service.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 99f3d85d627..75dcdbe8f36 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1538,7 +1538,7 @@ mod tests { #[tokio::test] #[instrument] - async fn gossipsub_broadcast_tx_with_reject() { + async fn gossipsub_broadcast_tx_with_reject__new_tx() { for _ in 0..100 { tokio::time::timeout( Duration::from_secs(5), @@ -1555,6 +1555,25 @@ mod tests { } } + #[tokio::test] + #[instrument] + async fn gossipsub_broadcast_tx_with_reject__tx_confirmations() { + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::Confirmations(Arc::new( + TxConfirmations::default_test_tx(), + )), + GossipsubMessageAcceptance::Reject, + None, + ), + ) + .await + .unwrap(); + } + } + #[tokio::test] #[instrument] #[ignore] From b5cc5dcb629a806e9e49f3614c0fa65cc2d69c00 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 13:06:15 -0700 Subject: [PATCH 06/20] Add test for p2p task for broadcasting the message --- crates/services/p2p/src/service.rs | 86 ++++++++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 4 deletions(-) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index e373aa846b2..1b70e704d99 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -1022,7 +1022,7 @@ where let _ = self.broadcast.block_height_broadcast(block_height_data); } Some(FuelP2PEvent::GossipsubMessage { message, message_id, peer_id,.. }) => { - + tracing::info!("Received gossip message from peer {:?}", peer_id); self.broadcast_gossip_message(message, message_id, peer_id); }, Some(FuelP2PEvent::InboundRequestMessage { request_message, request_id }) => { @@ -1441,7 +1441,10 @@ pub mod tests { use super::*; - use crate::peer_manager::heartbeat_data::HeartbeatData; + use crate::{ + gossipsub::topics::TX_CONFIRMATIONS_GOSSIP_TOPIC, + peer_manager::heartbeat_data::HeartbeatData, + }; use fuel_core_services::{ Service, State, @@ -1450,8 +1453,10 @@ pub mod tests { use fuel_core_types::{ blockchain::consensus::Genesis, fuel_types::BlockHeight, + services::p2p::TxConfirmations, }; use futures::FutureExt; + use libp2p::gossipsub::TopicHash; use std::{ collections::VecDeque, time::SystemTime, @@ -1641,6 +1646,7 @@ pub mod tests { struct FakeBroadcast { pub peer_reports: mpsc::Sender<(FuelPeerId, AppScore, String)>, + pub confirmation_gossip_broadcast: mpsc::Sender, } impl Broadcast for FakeBroadcast { @@ -1674,9 +1680,10 @@ pub mod tests { fn confirmations_broadcast( &self, - _confirmations: ConfirmationsGossipData, + confirmations: ConfirmationsGossipData, ) -> anyhow::Result<()> { - todo!() + self.confirmation_gossip_broadcast.try_send(confirmations)?; + Ok(()) } fn new_tx_subscription_broadcast( @@ -1719,6 +1726,7 @@ pub mod tests { let (report_sender, mut report_receiver) = mpsc::channel(100); let broadcast = FakeBroadcast { peer_reports: report_sender, + confirmation_gossip_broadcast: mpsc::channel(100).0, }; // Less than actual @@ -1812,6 +1820,7 @@ pub mod tests { let (report_sender, mut report_receiver) = mpsc::channel(100); let broadcast = FakeBroadcast { peer_reports: report_sender, + confirmation_gossip_broadcast: mpsc::channel(100).0, }; // Greater than actual @@ -1903,6 +1912,7 @@ pub mod tests { let (request_sender, request_receiver) = mpsc::channel(100); let broadcast = FakeBroadcast { peer_reports: mpsc::channel(100).0, + confirmation_gossip_broadcast: mpsc::channel(100).0, }; let mut task = Task { chain_id: Default::default(), @@ -1938,4 +1948,72 @@ pub mod tests { .expect("Should process the block height even under p2p pressure"); } } + + fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { + let peer_id = PeerId::random(); + let message_id = vec![1, 2, 3, 4, 5].into(); + let topic_hash = TopicHash::from_raw(TX_CONFIRMATIONS_GOSSIP_TOPIC); + let confirmations = TxConfirmations::default_test_tx(); + let message = GossipsubMessage::Confirmations(confirmations); + FuelP2PEvent::GossipsubMessage { + peer_id, + message_id, + topic_hash, + message, + } + } + + #[tokio::test] + async fn run__gossip_message_from_p2p_service_is_broadcasted__tx_confirmations() { + // given + let gossip_message_event = arb_tx_confirmation_gossip_message(); + let events = vec![gossip_message_event.clone()]; + let event_stream = futures::stream::iter(events); + let p2p_service = FakeP2PService { + peer_info: vec![], + next_event_stream: Box::pin(event_stream), + }; + let (confirmations_sender, mut confirmations_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: mpsc::channel(100).0, + confirmation_gossip_broadcast: confirmations_sender, + }; + let (request_sender, request_receiver) = mpsc::channel(100); + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + view_provider: FakeDB, + next_block_height: FakeBlockImporter.next_block_height(), + tx_pool: FakeTxPool, + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval: Default::default(), + heartbeat_max_time_since_last: Default::default(), + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: Default::default(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + + // when + let mut watcher = StateWatcher::started(); + task.run(&mut watcher).await; + tokio::time::sleep(Duration::from_millis(100)).await; + + // then + let actual = confirmations_receiver.try_recv().unwrap().data.unwrap(); + let FuelP2PEvent::GossipsubMessage { message, .. } = gossip_message_event else { + panic!("Expected GossipsubMessage event"); + }; + let GossipsubMessage::Confirmations(expected) = message else { + panic!("Expected Confirmations message"); + }; + assert_eq!(expected, actual); + } } From 30e66cd5835b7bb64a849b7a993bbaeb36440bd9 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 15:05:18 -0700 Subject: [PATCH 07/20] Add test for broadcast impl of shared state, refactor --- crates/services/p2p/src/p2p_service.rs | 1410 +---------------- crates/services/p2p/src/p2p_service/tests.rs | 1388 ++++++++++++++++ crates/services/p2p/src/service.rs | 605 +------ .../p2p/src/service/broadcast_tests.rs | 32 + crates/services/p2p/src/service/task_tests.rs | 571 +++++++ 5 files changed, 2013 insertions(+), 1993 deletions(-) create mode 100644 crates/services/p2p/src/p2p_service/tests.rs create mode 100644 crates/services/p2p/src/service/broadcast_tests.rs create mode 100644 crates/services/p2p/src/service/task_tests.rs diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 75dcdbe8f36..02c6c255d28 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -85,6 +85,9 @@ use tracing::{ warn, }; +#[cfg(test)] +mod tests; + /// Maximum amount of peer's addresses that we are ready to store per peer const MAX_IDENTIFY_ADDRESSES: usize = 10; @@ -897,1410 +900,3 @@ impl FuelP2PService { }) } } - -#[allow(clippy::cast_possible_truncation)] -#[cfg(test)] -mod tests { - #![allow(non_snake_case)] - - use super::{ - FuelP2PService, - PublishError, - }; - use crate::{ - codecs::postcard::PostcardCodec, - config::Config, - gossipsub::{ - messages::{ - GossipTopicTag, - GossipsubBroadcastRequest, - GossipsubMessage, - }, - topics::{ - NEW_TX_GOSSIP_TOPIC, - TX_CONFIRMATIONS_GOSSIP_TOPIC, - }, - }, - p2p_service::FuelP2PEvent, - peer_manager::PeerInfo, - request_response::messages::{ - RequestMessage, - ResponseError, - ResponseSender, - V2ResponseMessage, - }, - service::to_message_acceptance, - }; - use fuel_core_types::{ - blockchain::{ - consensus::{ - poa::PoAConsensus, - Consensus, - }, - header::BlockHeader, - SealedBlockHeader, - }, - fuel_tx::{ - Transaction, - TransactionBuilder, - TxId, - UniqueIdentifier, - }, - fuel_types::ChainId, - services::p2p::{ - GossipsubMessageAcceptance, - NetworkableTransactionPool, - Transactions, - TxConfirmations, - }, - }; - use futures::{ - future::join_all, - StreamExt, - }; - use libp2p::{ - gossipsub::{ - Sha256Topic, - Topic, - }, - identity::Keypair, - swarm::{ - ListenError, - SwarmEvent, - }, - Multiaddr, - PeerId, - }; - use rand::Rng; - use std::{ - collections::HashSet, - ops::{ - Deref, - Range, - }, - sync::Arc, - time::Duration, - }; - use tokio::sync::{ - broadcast, - mpsc, - oneshot, - watch, - }; - use tracing_attributes::instrument; - - type P2PService = FuelP2PService; - - /// helper function for building FuelP2PService - async fn build_service_from_config(mut p2p_config: Config) -> P2PService { - p2p_config.keypair = Keypair::generate_secp256k1(); // change keypair for each Node - let max_block_size = p2p_config.max_block_size; - let (sender, _) = - broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); - - let mut service = - FuelP2PService::new(sender, p2p_config, PostcardCodec::new(max_block_size)) - .await - .unwrap(); - service.start().await.unwrap(); - service - } - - async fn setup_bootstrap_nodes( - p2p_config: &Config, - bootstrap_nodes_count: usize, - ) -> (Vec, Vec) { - let nodes = join_all( - (0..bootstrap_nodes_count) - .map(|_| build_service_from_config(p2p_config.clone())), - ) - .await; - let bootstrap_multiaddrs = nodes - .iter() - .flat_map(|b| b.multiaddrs()) - .collect::>(); - (nodes, bootstrap_multiaddrs) - } - - fn spawn(stop: &watch::Sender<()>, mut node: P2PService) { - let mut stop = stop.subscribe(); - tokio::spawn(async move { - loop { - tokio::select! { - _ = node.next_event() => {} - _ = stop.changed() => { - break; - } - } - } - }); - } - - #[tokio::test] - #[instrument] - async fn p2p_service_works() { - build_service_from_config(Config::default_initialized("p2p_service_works")).await; - } - - // Single sentry node connects to multiple reserved nodes and `max_peers_allowed` amount of non-reserved nodes. - // It also tries to dial extra non-reserved nodes to establish the connection. - // A single reserved node is not started immediately with the rest of the nodes. - // Once sentry node establishes the connection with the allowed number of nodes - // we start the reserved node, and await for it to establish the connection. - // This test proves that there is always an available slot for the reserved node to connect to. - #[tokio::test(flavor = "multi_thread")] - #[instrument] - async fn reserved_nodes_reconnect_works() { - let p2p_config = Config::default_initialized("reserved_nodes_reconnect_works"); - - // total amount will be `max_peers_allowed` + `reserved_nodes.len()` - let max_peers_allowed: usize = 3; - - let (bootstrap_nodes, bootstrap_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, max_peers_allowed.saturating_mul(5)).await; - let (mut reserved_nodes, reserved_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, max_peers_allowed).await; - - let mut sentry_node = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = max_peers_allowed as u32; - - p2p_config.bootstrap_nodes = bootstrap_multiaddrs; - - p2p_config.reserved_nodes = reserved_multiaddrs; - - build_service_from_config(p2p_config).await - }; - - // pop() a single reserved node, so it's not run with the rest of the nodes - let mut reserved_node = reserved_nodes.pop(); - let reserved_node_peer_id = reserved_node.as_ref().unwrap().local_peer_id; - - let all_node_services: Vec<_> = bootstrap_nodes - .into_iter() - .chain(reserved_nodes.into_iter()) - .collect(); - - let mut all_nodes_ids: Vec = all_node_services - .iter() - .map(|service| service.local_peer_id) - .collect(); - - let (stop_sender, _) = watch::channel(()); - all_node_services.into_iter().for_each(|node| { - spawn(&stop_sender, node); - }); - - loop { - tokio::select! { - sentry_node_event = sentry_node.next_event() => { - // we've connected to all other peers - if sentry_node.peer_manager.total_peers_connected() > max_peers_allowed { - // if the `reserved_node` is not included, - // create and insert it, to be polled with rest of the nodes - if !all_nodes_ids - .iter() - .any(|local_peer_id| local_peer_id == &reserved_node_peer_id) { - if let Some(node) = reserved_node { - all_nodes_ids.push(node.local_peer_id); - spawn(&stop_sender, node); - reserved_node = None; - } - } - } - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { - // we connected to the desired reserved node - if peer_id == reserved_node_peer_id { - break - } - } - }, - } - } - stop_sender.send(()).unwrap(); - } - - #[tokio::test] - #[instrument] - async fn dont_connect_to_node_with_same_peer_id() { - let mut p2p_config = - Config::default_initialized("dont_connect_to_node_with_same_peer_id"); - let mut node_a = build_service_from_config(p2p_config.clone()).await; - // We don't use build_service_from_config here, because we want to use the same keypair - // to have the same PeerId - let node_b = { - // Given - p2p_config.reserved_nodes = node_a.multiaddrs(); - let max_block_size = p2p_config.max_block_size; - let (sender, _) = - broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); - - let mut service = FuelP2PService::new( - sender, - p2p_config, - PostcardCodec::new(max_block_size), - ) - .await - .unwrap(); - service.start().await.unwrap(); - service - }; - // When - tokio::time::timeout(Duration::from_secs(5), async move { - loop { - let event = node_a.next_event().await; - if let Some(FuelP2PEvent::PeerConnected(_)) = event { - panic!("Node B should not connect to Node A because they have the same PeerId"); - } - assert_eq!(node_a.peer_manager().total_peers_connected(), 0); - } - }) - .await - // Then - .expect_err("The node should not connect to itself"); - assert_eq!(node_b.peer_manager().total_peers_connected(), 0); - } - - // We start with two nodes, node_a and node_b, bootstrapped with `bootstrap_nodes_count` other nodes. - // Yet node_a and node_b are only allowed to connect to specified amount of nodes. - #[tokio::test] - #[instrument] - async fn max_peers_connected_works() { - let p2p_config = Config::default_initialized("max_peers_connected_works"); - - let bootstrap_nodes_count = 20; - let node_a_max_peers_allowed: usize = 3; - let node_b_max_peers_allowed: usize = 5; - - let (mut nodes, nodes_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, bootstrap_nodes_count).await; - - // this node is allowed to only connect to `node_a_max_peers_allowed` other nodes - let mut node_a = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = node_a_max_peers_allowed as u32; - // it still tries to dial all nodes! - p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); - - build_service_from_config(p2p_config).await - }; - - // this node is allowed to only connect to `node_b_max_peers_allowed` other nodes - let mut node_b = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = node_b_max_peers_allowed as u32; - // it still tries to dial all nodes! - p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); - - build_service_from_config(p2p_config).await - }; - - let (tx, mut rx) = tokio::sync::oneshot::channel::<()>(); - let jh = tokio::spawn(async move { - while rx.try_recv().is_err() { - futures::stream::iter(nodes.iter_mut()) - .for_each_concurrent(4, |node| async move { - node.next_event().await; - }) - .await; - } - }); - - let mut node_a_hit_limit = false; - let mut node_b_hit_limit = false; - let mut instance = tokio::time::Instant::now(); - - // After we hit limit for node_a and node_b start timer. - // If we don't exceed the limit during 5 seconds, finish the test successfully. - while instance.elapsed().as_secs() < 5 { - tokio::select! { - event_from_node_a = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_a { - if node_a.peer_manager().total_peers_connected() > node_a_max_peers_allowed { - panic!("The node should only connect to max {node_a_max_peers_allowed} peers"); - } - node_a_hit_limit |= node_a.peer_manager().total_peers_connected() == node_a_max_peers_allowed; - } - tracing::info!("Event from the node_a: {:?}", event_from_node_a); - }, - event_from_node_b = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_b { - if node_b.peer_manager().total_peers_connected() > node_b_max_peers_allowed { - panic!("The node should only connect to max {node_b_max_peers_allowed} peers"); - } - node_b_hit_limit |= node_b.peer_manager().total_peers_connected() == node_b_max_peers_allowed; - } - tracing::info!("Event from the node_b: {:?}", event_from_node_b); - }, - } - - if !(node_a_hit_limit && node_b_hit_limit) { - instance = tokio::time::Instant::now(); - } - } - - tx.send(()).unwrap(); - jh.await.unwrap() - } - - // Simulate 2 Sets of Sentry nodes. - // In both Sets, a single Guarded Node should only be connected to their sentry nodes. - // While other nodes can and should connect to nodes outside of the Sentry Set. - #[tokio::test(flavor = "multi_thread")] - #[instrument] - async fn sentry_nodes_working() { - const RESERVED_NODE_SIZE: usize = 4; - - let mut p2p_config = Config::default_initialized("sentry_nodes_working"); - - async fn build_sentry_nodes(p2p_config: Config) -> (P2PService, Vec) { - let (reserved_nodes, reserved_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, RESERVED_NODE_SIZE).await; - - // set up the guraded node service with `reserved_nodes_only_mode` - let guarded_node_service = { - let mut p2p_config = p2p_config.clone(); - p2p_config.reserved_nodes = reserved_multiaddrs; - p2p_config.reserved_nodes_only_mode = true; - build_service_from_config(p2p_config).await - }; - - let sentry_nodes = reserved_nodes; - - (guarded_node_service, sentry_nodes) - } - - let (mut first_guarded_node, mut first_sentry_nodes) = - build_sentry_nodes(p2p_config.clone()).await; - p2p_config.bootstrap_nodes = first_sentry_nodes - .iter() - .flat_map(|n| n.multiaddrs()) - .collect(); - - let (mut second_guarded_node, second_sentry_nodes) = - build_sentry_nodes(p2p_config).await; - - let first_sentry_set: HashSet<_> = first_sentry_nodes - .iter() - .map(|node| node.local_peer_id) - .collect(); - - let second_sentry_set: HashSet<_> = second_sentry_nodes - .iter() - .map(|node| node.local_peer_id) - .collect(); - - let mut single_sentry_node = first_sentry_nodes.pop().unwrap(); - let mut sentry_node_connections = HashSet::new(); - let (stop_sender, _) = watch::channel(()); - first_sentry_nodes - .into_iter() - .chain(second_sentry_nodes.into_iter()) - .for_each(|node| { - spawn(&stop_sender, node); - }); - - let mut instance = tokio::time::Instant::now(); - // After guards are connected to all sentries and at least one sentry has - // more connections than sentries in the group, start the timer.. - // If guards don't connected to new nodes during 5 seconds, finish the test successfully. - while instance.elapsed().as_secs() < 5 { - tokio::select! { - event_from_first_guarded = first_guarded_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_first_guarded { - if !first_sentry_set.contains(&peer_id) { - panic!("The node should only connect to the specified reserved nodes!"); - } - } - tracing::info!("Event from the first guarded node: {:?}", event_from_first_guarded); - }, - event_from_second_guarded = second_guarded_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_second_guarded { - if !second_sentry_set.contains(&peer_id) { - panic!("The node should only connect to the specified reserved nodes!"); - } - } - tracing::info!("Event from the second guarded node: {:?}", event_from_second_guarded); - }, - // Poll one of the reserved, sentry nodes - sentry_node_event = single_sentry_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { - sentry_node_connections.insert(peer_id); - } - } - }; - - // This reserved node has connected to more than the number of reserved nodes it is part of. - // It means it has discovered other nodes in the network. - if sentry_node_connections.len() < 2 * RESERVED_NODE_SIZE { - instance = tokio::time::Instant::now(); - } - } - stop_sender.send(()).unwrap(); - } - - // Simulates 2 p2p nodes that are on the same network and should connect via mDNS - // without any additional bootstrapping - #[tokio::test] - #[instrument] - async fn nodes_connected_via_mdns() { - // Node A - let mut p2p_config = Config::default_initialized("nodes_connected_via_mdns"); - p2p_config.enable_mdns = true; - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - let mut node_b = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - // successfully connected to Node A - break - } - tracing::info!("Node B Event: {:?}", node_b_event); - }, - _ = node_a.swarm.select_next_some() => {}, - }; - } - } - - // Simulates 2 p2p nodes that are on the same network but their Fuel Upgrade checksum is different - // (different chain id or chain config) - // So they are not able to connect - #[tokio::test] - #[instrument] - async fn nodes_cannot_connect_due_to_different_checksum() { - use libp2p::TransportError; - // Node A - let mut p2p_config = - Config::default_initialized("nodes_cannot_connect_due_to_different_checksum"); - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // different checksum - p2p_config.checksum = [1u8; 32].into(); - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - // Node B - let mut node_b = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_a_event = node_a.swarm.select_next_some() => { - tracing::info!("Node A Event: {:?}", node_a_event); - if let SwarmEvent::IncomingConnectionError { error: ListenError::Transport(TransportError::Other(_)), .. } = node_a_event { - break - } - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - panic!("Node B should not connect to Node A!") - } - tracing::info!("Node B Event: {:?}", node_b_event); - }, - - }; - } - } - - // Simulates 3 p2p nodes, Node B & Node C are bootstrapped with Node A - // Using Identify Protocol Node C should be able to identify and connect to Node B - #[tokio::test] - #[instrument] - async fn nodes_connected_via_identify() { - // Node A - let mut p2p_config = Config::default_initialized("nodes_connected_via_identify"); - - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - let mut node_c = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - tracing::info!("Node B Event: {:?}", node_b_event); - }, - - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = node_c_event { - // we have connected to Node B! - if peer_id == node_b.local_peer_id { - break - } - } - - tracing::info!("Node C Event: {:?}", node_c_event); - } - }; - } - } - - // Simulates 2 p2p nodes that connect to each other and consequently exchange Peer Info - // On successful connection, node B updates its latest BlockHeight - // and shares it with Peer A via Heartbeat protocol - #[tokio::test] - #[instrument] - async fn peer_info_updates_work() { - let mut p2p_config = Config::default_initialized("peer_info_updates_work"); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config).await; - - let latest_block_height = 40_u32.into(); - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if let Some(PeerInfo { heartbeat_data, client_version, .. }) = node_a.peer_manager.get_peer_info(&peer_id) { - // Exits after it verifies that: - // 1. Peer Addresses are known - // 2. Client Version is known - // 3. Node has responded with their latest BlockHeight - if client_version.is_some() && heartbeat_data.block_height == Some(latest_block_height) { - break; - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - // we've connected to Peer A - // let's update our BlockHeight - node_b.update_block_height(latest_block_height); - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - } - } - } - - #[tokio::test] - #[instrument] - async fn gossipsub_broadcast_tx_with_accept__new_tx() { - tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .with_target(false) - .init(); - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx(Arc::new( - Transaction::default_test_tx(), - )), - GossipsubMessageAcceptance::Accept, - None, - ), - ) - .await - .unwrap(); - } - } - - #[tokio::test] - #[instrument] - async fn gossipsub_broadcast_tx_with_accept__tx_confirmations() { - tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .with_target(false) - .init(); - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(20), - gossipsub_broadcast( - GossipsubBroadcastRequest::Confirmations(Arc::new( - TxConfirmations::default_test_tx(), - )), - GossipsubMessageAcceptance::Accept, - None, - ), - ) - .await - .unwrap(); - } - } - - #[tokio::test] - #[instrument] - async fn gossipsub_broadcast_tx_with_reject__new_tx() { - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx(Arc::new( - Transaction::default_test_tx(), - )), - GossipsubMessageAcceptance::Reject, - None, - ), - ) - .await - .unwrap(); - } - } - - #[tokio::test] - #[instrument] - async fn gossipsub_broadcast_tx_with_reject__tx_confirmations() { - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::Confirmations(Arc::new( - TxConfirmations::default_test_tx(), - )), - GossipsubMessageAcceptance::Reject, - None, - ), - ) - .await - .unwrap(); - } - } - - #[tokio::test] - #[instrument] - #[ignore] - async fn gossipsub_scoring_with_accepted_messages() { - gossipsub_scoring_tester( - "gossipsub_scoring_with_accepted_messages", - 100, - GossipsubMessageAcceptance::Accept, - ) - .await; - } - - /// At `GRAYLIST_THRESHOLD` the node will ignore all messages from the peer - /// And our PeerManager will ban the peer at that point - leading to disconnect - #[tokio::test] - #[instrument] - #[ignore] - async fn gossipsub_scoring_with_rejected_messages() { - gossipsub_scoring_tester( - "gossipsub_scoring_with_rejected_messages", - 100, - GossipsubMessageAcceptance::Reject, - ) - .await; - } - - // TODO: Move me before tests that use this function - /// Helper function for testing gossipsub scoring - /// ! Dev Note: this function runs forever, its purpose is to show the scoring in action with passage of time - async fn gossipsub_scoring_tester( - test_name: &str, - amount_of_msgs_per_second: usize, - acceptance: GossipsubMessageAcceptance, - ) { - let mut p2p_config = Config::default_initialized(test_name); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - p2p_config.bootstrap_nodes = node_b.multiaddrs(); - let mut node_c = build_service_from_config(p2p_config.clone()).await; - - let mut interval = tokio::time::interval(Duration::from_secs(1)); - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_a_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_a.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - } - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_b_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - }, - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_c_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_c.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - }, - _ = interval.tick() => { - let mut transactions = vec![]; - for _ in 0..amount_of_msgs_per_second { - let random_tx = - TransactionBuilder::script(rand::thread_rng().gen::<[u8; 32]>().to_vec(), rand::thread_rng().gen::<[u8; 32]>().to_vec()).finalize_as_transaction(); - - transactions.push(random_tx.clone()); - let random_tx = GossipsubBroadcastRequest::NewTx(Arc::new(random_tx)); - - match rand::thread_rng().gen_range(1..=3) { - 1 => { - // Node A sends a Transaction - let _ = node_a.publish_message(random_tx); - - }, - 2 => { - // Node B sends a Transaction - let _ = node_b.publish_message(random_tx); - - }, - 3 => { - // Node C sends a Transaction - let _ = node_c.publish_message(random_tx); - }, - _ => unreachable!("Random number generator is broken") - } - } - - eprintln!("Node A WORLD VIEW"); - eprintln!("B score: {:?}", node_a.get_peer_score(&node_b.local_peer_id).unwrap()); - eprintln!("C score: {:?}", node_a.get_peer_score(&node_c.local_peer_id).unwrap()); - eprintln!(); - - eprintln!("Node B WORLD VIEW"); - eprintln!("A score: {:?}", node_b.get_peer_score(&node_a.local_peer_id).unwrap()); - eprintln!("C score: {:?}", node_b.get_peer_score(&node_c.local_peer_id).unwrap()); - eprintln!(); - - eprintln!("Node C WORLD VIEW"); - eprintln!("A score: {:?}", node_c.get_peer_score(&node_a.local_peer_id).unwrap()); - eprintln!("B score: {:?}", node_c.get_peer_score(&node_b.local_peer_id).unwrap()); - eprintln!(); - - // never ending loop - // break; - } - } - } - } - - // TODO: Move me before tests that use this function - /// Reusable helper function for Broadcasting Gossipsub requests - async fn gossipsub_broadcast( - broadcast_request: GossipsubBroadcastRequest, - acceptance: GossipsubMessageAcceptance, - connection_limit: Option, - ) { - let mut p2p_config = Config::default_initialized("gossipsub_exchanges_messages"); - - if let Some(connection_limit) = connection_limit { - p2p_config.max_gossipsub_peers_connected = connection_limit; - } - - let (selected_topic, selected_tag): (Sha256Topic, GossipTopicTag) = { - let (topic, tag) = match broadcast_request { - GossipsubBroadcastRequest::NewTx(_) => { - (NEW_TX_GOSSIP_TOPIC, GossipTopicTag::NewTx) - } - GossipsubBroadcastRequest::Confirmations(_) => ( - TX_CONFIRMATIONS_GOSSIP_TOPIC, - GossipTopicTag::TxConfirmations, - ), - }; - - ( - Topic::new(format!("{}/{}", topic, p2p_config.network_name)), - tag, - ) - }; - tracing::info!("Selected Topic: {:?}", selected_topic); - - let mut message_sent = false; - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - p2p_config.bootstrap_nodes = node_b.multiaddrs(); - let mut node_c = build_service_from_config(p2p_config.clone()).await; - - // Node C does not connect to Node A - // it should receive the propagated message from Node B if `GossipsubMessageAcceptance` is `Accept` - node_c - .swarm - .behaviour_mut() - .block_peer(node_a.local_peer_id); - - let mut a_connected_to_b = false; - let mut b_connected_to_c = false; - loop { - // verifies that we've got at least a single peer address to send message to - if a_connected_to_b && b_connected_to_c && !message_sent { - message_sent = true; - let broadcast_request = broadcast_request.clone(); - node_a.publish_message(broadcast_request).unwrap(); - } - - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::NewSubscription { peer_id, tag }) = &node_a_event { - if tag != &selected_tag { - tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); - } else { - if peer_id == &node_b.local_peer_id { - a_connected_to_b = true; - } - } - } - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::NewSubscription { peer_id,tag, }) = &node_b_event { - tracing::info!("New subscription for peer_id: {:?} with tag: {:?}", peer_id, tag); - if tag != &selected_tag { - tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); - } else { - if peer_id == &node_c.local_peer_id { - b_connected_to_c = true; - } - } - } - - if let Some(FuelP2PEvent::GossipsubMessage { topic_hash, message, message_id, peer_id }) = node_b_event.clone() { - // Message Validation must be reported - // If it's `Accept`, Node B will propagate the message to Node C - // If it's `Ignore` or `Reject`, Node C should not receive anything - let msg_acceptance = to_message_acceptance(&acceptance); - node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); - if topic_hash != selected_topic.hash() { - tracing::error!("Wrong topic hash, expected: {} - actual: {}", selected_topic.hash(), topic_hash); - panic!("Wrong Topic"); - } - - check_message_matches_request(&message, &broadcast_request); - - // Node B received the correct message - // If we try to publish it again we will get `PublishError::Duplicate` - // This asserts that our MessageId calculation is consistent irrespective of which Peer sends it - let broadcast_request = broadcast_request.clone(); - matches!(node_b.publish_message(broadcast_request), Err(PublishError::Duplicate)); - - match acceptance { - GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { - break - }, - _ => { - // the `exit` should happen in Node C - } - } - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { peer_id, .. }) = node_c_event.clone() { - // Node B should be the source propagator - assert!(peer_id == node_b.local_peer_id); - match acceptance { - GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { - panic!("Node C should not receive Rejected or Ignored messages") - }, - GossipsubMessageAcceptance::Accept => { - break - } - } - } - } - }; - } - } - - fn check_message_matches_request( - message: &GossipsubMessage, - expected: &GossipsubBroadcastRequest, - ) { - match (message, expected) { - (GossipsubMessage::NewTx(received), GossipsubBroadcastRequest::NewTx(requested)) => { - assert_eq!(requested.deref(), received, "Both messages were `NewTx`s, but the received message did not match the requested message"); - } - ( - GossipsubMessage::Confirmations(received), - GossipsubBroadcastRequest::Confirmations(requested), - ) => assert_eq!(requested.deref(), received, "Both messages were `Confirmations`, but the received message did not match the requested message"), - _ => panic!("Message does not match the expected request, expected: {:?}, actual: {:?}", expected, message), - } - } - - fn arbitrary_headers_for_range(range: Range) -> Vec { - let mut blocks = Vec::new(); - for i in range { - let mut header: BlockHeader = Default::default(); - header.set_block_height(i.into()); - - let sealed_block = SealedBlockHeader { - entity: header, - consensus: Consensus::PoA(PoAConsensus::new(Default::default())), - }; - blocks.push(sealed_block); - } - blocks - } - - // Metadata gets skipped during serialization, so this is the fuzzy way to compare blocks - fn eq_except_metadata(a: &SealedBlockHeader, b: &SealedBlockHeader) -> bool { - let app_eq = match (&a.entity, &b.entity) { - (BlockHeader::V1(a), BlockHeader::V1(b)) => { - a.application() == b.application() - } - #[cfg(feature = "fault-proving")] - (BlockHeader::V2(a), BlockHeader::V2(b)) => { - a.application() == b.application() - } - #[cfg_attr(not(feature = "fault-proving"), allow(unreachable_patterns))] - _ => false, - }; - app_eq && a.entity.consensus() == b.entity.consensus() - } - - async fn request_response_works_with( - request_msg: RequestMessage, - connection_limit: Option, - ) { - let mut p2p_config = Config::default_initialized("request_response_works_with"); - - if let Some(connection_limit) = connection_limit { - p2p_config.max_request_response_peers_connected = connection_limit; - } - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); - - let mut request_sent = false; - - loop { - tokio::select! { - message_sent = rx_test_end.recv() => { - // we received a signal to end the test - assert!(message_sent.unwrap(), "Received incorrect or missing message"); - break; - } - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - match request_msg.clone() { - RequestMessage::SealedHeaders(range) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - let expected = arbitrary_headers_for_range(range.clone()); - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(Ok(sealed_headers)))) => { - let check = expected.iter().zip(sealed_headers.iter()).all(|(a, b)| eq_except_metadata(a, b)); - let _ = tx_test_end.send(check).await; - }, - Ok((_, Ok(Err(e)))) => { - tracing::error!("Node A did not return any headers: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Ok((_, Err(e))) => { - tracing::error!("Error in P2P communication: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Err(e) => { - tracing::error!("Error in P2P before sending message: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - } - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::Transactions(_range) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::Transactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(Ok(transactions)))) => { - let check = transactions.len() == 1 && transactions[0].0.len() == 5; - let _ = tx_test_end.send(check).await; - }, - Ok((_, Ok(Err(e)))) => { - tracing::error!("Node A did not return any transactions: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Ok((_, Err(e))) => { - tracing::error!("Error in P2P communication: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Err(e) => { - tracing::error!("Error in P2P before sending message: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - } - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::TxPoolAllTransactionsIds => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolAllTransactionsIds(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok((_, Ok(Ok(transaction_ids)))) = response_message { - let tx_ids: Vec = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); - let check = transaction_ids.len() == 5 && transaction_ids.iter().zip(tx_ids.iter()).all(|(a, b)| a == b); - let _ = tx_test_end.send(check).await; - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::TxPoolFullTransactions(tx_ids) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolFullTransactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok((_, Ok(Ok(transactions)))) = response_message { - let txs: Vec> = tx_ids.iter().enumerate().map(|(i, _)| { - if i == 0 { - None - } else { - Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) - } - }).collect(); - let check = transactions.len() == tx_ids.len() && transactions.iter().zip(txs.iter()).all(|(a, b)| a == b); - let _ = tx_test_end.send(check).await; - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - } - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator - if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: received_request_message }) = &node_b_event { - match received_request_message { - RequestMessage::SealedHeaders(range) => { - let sealed_headers: Vec<_> = arbitrary_headers_for_range(range.clone()); - - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); - } - RequestMessage::Transactions(_) => { - let txs = (0..5).map(|_| Transaction::default_test_tx()).collect(); - let transactions = vec![Transactions(txs)]; - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::Transactions(Ok(transactions))); - } - RequestMessage::TxPoolAllTransactionsIds => { - let tx_ids = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolAllTransactionsIds(Ok(tx_ids))); - } - RequestMessage::TxPoolFullTransactions(tx_ids) => { - let txs = tx_ids.iter().enumerate().map(|(i, _)| { - if i == 0 { - None - } else { - Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) - } - }).collect(); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolFullTransactions(Ok(txs))); - } - } - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } - } - - #[tokio::test] - #[instrument] - async fn request_response_works_with_transactions() { - let arbitrary_range = 2..6; - request_response_works_with(RequestMessage::Transactions(arbitrary_range), None) - .await - } - - #[tokio::test] - #[instrument] - async fn request_response_works_with_sealed_headers_range_inclusive() { - let arbitrary_range = 2..6; - request_response_works_with(RequestMessage::SealedHeaders(arbitrary_range), None) - .await - } - - #[tokio::test] - #[instrument] - async fn request_response_works_with_transactions_ids() { - request_response_works_with(RequestMessage::TxPoolAllTransactionsIds, None).await - } - - #[tokio::test] - #[instrument] - async fn request_response_works_with_full_transactions() { - let tx_ids = (0..10) - .map(|_| Transaction::default_test_tx().id(&ChainId::new(1))) - .collect(); - request_response_works_with(RequestMessage::TxPoolFullTransactions(tx_ids), None) - .await - } - - /// We send a request for transactions, but it's responded by only headers - #[tokio::test] - #[instrument] - async fn invalid_response_type_is_detected() { - let mut p2p_config = - Config::default_initialized("invalid_response_type_is_detected"); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); - - let mut request_sent = false; - - loop { - tokio::select! { - message_sent = rx_test_end.recv() => { - // we received a signal to end the test - assert!(message_sent.unwrap(), "Received incorrect or missing message"); - break; - } - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, RequestMessage::Transactions(0..2), ResponseSender::Transactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(_))) => { - let _ = tx_test_end.send(false).await; - panic!("Request succeeded unexpectedly"); - }, - Ok((_, Err(ResponseError::TypeMismatch))) => { - // Got Invalid Response Type as expected, so end test - let _ = tx_test_end.send(true).await; - }, - Ok((_, Err(err))) => { - let _ = tx_test_end.send(false).await; - panic!("Unexpected error in P2P communication: {:?}", err); - }, - Err(e) => { - let _ = tx_test_end.send(false).await; - panic!("Error in P2P before sending message: {:?}", e); - }, - } - } else { - let _ = tx_test_end.send(false).await; - panic!("Orchestrator failed to receive a message: {:?}", response_message); - } - }); - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator - if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: _ }) = &node_b_event { - let sealed_headers: Vec<_> = arbitrary_headers_for_range(1..3); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } - } - - #[tokio::test] - #[instrument] - async fn req_res_outbound_timeout_works() { - let mut p2p_config = - Config::default_initialized("req_res_outbound_timeout_works"); - - // Node A - // setup request timeout to 1ms in order for the Request to fail - p2p_config.set_request_timeout = Duration::from_millis(1); - - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - p2p_config.set_request_timeout = Duration::from_secs(20); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = tokio::sync::mpsc::channel(1); - - // track the request sent in order to avoid duplicate sending - let mut request_sent = false; - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - // 1. Simulating Oneshot channel from the NetworkOrchestrator - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - - // 2a. there should be ZERO pending outbound requests in the table - assert_eq!(node_a.outbound_requests_table.len(), 0); - - // Request successfully sent - let requested_block_height = RequestMessage::SealedHeaders(0..0); - assert!(node_a.send_request_msg(None, requested_block_height, ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); - - // 2b. there should be ONE pending outbound requests in the table - assert_eq!(node_a.outbound_requests_table.len(), 1); - - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - // 3. Simulating NetworkOrchestrator receiving a Timeout Error Message! - let response_message = rx_orchestrator.await; - if let Ok(response) = response_message { - match response { - Ok((_, Ok(_))) => { - let _ = tx_test_end.send(false).await; - panic!("Request succeeded unexpectedly"); - }, - Ok((_, Err(ResponseError::P2P(_)))) => { - // Got Invalid Response Type as expected, so end test - let _ = tx_test_end.send(true).await; - }, - Ok((_, Err(err))) => { - let _ = tx_test_end.send(false).await; - panic!("Unexpected error in P2P communication: {:?}", err); - }, - Err(e) => { - let _ = tx_test_end.send(false).await; - panic!("Error in P2P before sending message: {:?}", e); - }, - } - } else { - let _ = tx_test_end.send(false).await; - panic!("Orchestrator failed to receive a message: {:?}", response_message); - } - }); - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - recv = rx_test_end.recv() => { - assert_eq!(recv, Some(true), "Test failed"); - // we received a signal to end the test - // 4. there should be ZERO pending outbound requests in the table - // after the Outbound Request Failed with Timeout - assert_eq!(node_a.outbound_requests_table.len(), 0); - break; - }, - // will not receive the request at all - node_b_event = node_b.next_event() => { - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } - } - - #[tokio::test] - async fn gossipsub_peer_limit_works() { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx(Arc::new( - Transaction::default_test_tx(), - )), - GossipsubMessageAcceptance::Accept, - Some(1) // limit to 1 peer, therefore the function will timeout, as it will not be able to propagate the message - ), - ) - .await.expect_err("Should have timed out"); - } - - #[tokio::test] - async fn request_response_peer_limit_works() { - let handle = tokio::spawn(async { - let arbitrary_range = 2..6; - - request_response_works_with( - RequestMessage::Transactions(arbitrary_range), - Some(0), // limit to 0 peers, - ) - .await; - }); - - let result = handle.await; - assert!(result.is_err()); - } -} diff --git a/crates/services/p2p/src/p2p_service/tests.rs b/crates/services/p2p/src/p2p_service/tests.rs new file mode 100644 index 00000000000..af6f10ddd4e --- /dev/null +++ b/crates/services/p2p/src/p2p_service/tests.rs @@ -0,0 +1,1388 @@ +#![allow(non_snake_case)] +#![allow(clippy::cast_possible_truncation)] + +use super::{ + FuelP2PService, + PublishError, +}; +use crate::{ + codecs::postcard::PostcardCodec, + config::Config, + gossipsub::{ + messages::{ + GossipTopicTag, + GossipsubBroadcastRequest, + GossipsubMessage, + }, + topics::{ + NEW_TX_GOSSIP_TOPIC, + TX_CONFIRMATIONS_GOSSIP_TOPIC, + }, + }, + p2p_service::FuelP2PEvent, + peer_manager::PeerInfo, + request_response::messages::{ + RequestMessage, + ResponseError, + ResponseSender, + V2ResponseMessage, + }, + service::to_message_acceptance, +}; +use fuel_core_types::{ + blockchain::{ + consensus::{ + poa::PoAConsensus, + Consensus, + }, + header::BlockHeader, + SealedBlockHeader, + }, + fuel_tx::{ + Transaction, + TransactionBuilder, + TxId, + UniqueIdentifier, + }, + fuel_types::ChainId, + services::p2p::{ + GossipsubMessageAcceptance, + NetworkableTransactionPool, + Transactions, + TxConfirmations, + }, +}; +use futures::{ + future::join_all, + StreamExt, +}; +use libp2p::{ + gossipsub::{ + Sha256Topic, + Topic, + }, + identity::Keypair, + swarm::{ + ListenError, + SwarmEvent, + }, + Multiaddr, + PeerId, +}; +use rand::Rng; +use std::{ + collections::HashSet, + ops::{ + Deref, + Range, + }, + sync::Arc, + time::Duration, +}; +use tokio::sync::{ + broadcast, + mpsc, + oneshot, + watch, +}; +use tracing_attributes::instrument; + +type P2PService = FuelP2PService; + +/// helper function for building FuelP2PService +async fn build_service_from_config(mut p2p_config: Config) -> P2PService { + p2p_config.keypair = Keypair::generate_secp256k1(); // change keypair for each Node + let max_block_size = p2p_config.max_block_size; + let (sender, _) = + broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); + + let mut service = + FuelP2PService::new(sender, p2p_config, PostcardCodec::new(max_block_size)) + .await + .unwrap(); + service.start().await.unwrap(); + service +} + +async fn setup_bootstrap_nodes( + p2p_config: &Config, + bootstrap_nodes_count: usize, +) -> (Vec, Vec) { + let nodes = join_all( + (0..bootstrap_nodes_count).map(|_| build_service_from_config(p2p_config.clone())), + ) + .await; + let bootstrap_multiaddrs = nodes + .iter() + .flat_map(|b| b.multiaddrs()) + .collect::>(); + (nodes, bootstrap_multiaddrs) +} + +fn spawn(stop: &watch::Sender<()>, mut node: P2PService) { + let mut stop = stop.subscribe(); + tokio::spawn(async move { + loop { + tokio::select! { + _ = node.next_event() => {} + _ = stop.changed() => { + break; + } + } + } + }); +} + +#[tokio::test] +#[instrument] +async fn p2p_service_works() { + build_service_from_config(Config::default_initialized("p2p_service_works")).await; +} + +// Single sentry node connects to multiple reserved nodes and `max_peers_allowed` amount of non-reserved nodes. +// It also tries to dial extra non-reserved nodes to establish the connection. +// A single reserved node is not started immediately with the rest of the nodes. +// Once sentry node establishes the connection with the allowed number of nodes +// we start the reserved node, and await for it to establish the connection. +// This test proves that there is always an available slot for the reserved node to connect to. +#[tokio::test(flavor = "multi_thread")] +#[instrument] +async fn reserved_nodes_reconnect_works() { + let p2p_config = Config::default_initialized("reserved_nodes_reconnect_works"); + + // total amount will be `max_peers_allowed` + `reserved_nodes.len()` + let max_peers_allowed: usize = 3; + + let (bootstrap_nodes, bootstrap_multiaddrs) = + setup_bootstrap_nodes(&p2p_config, max_peers_allowed.saturating_mul(5)).await; + let (mut reserved_nodes, reserved_multiaddrs) = + setup_bootstrap_nodes(&p2p_config, max_peers_allowed).await; + + let mut sentry_node = { + let mut p2p_config = p2p_config.clone(); + p2p_config.max_discovery_peers_connected = max_peers_allowed as u32; + + p2p_config.bootstrap_nodes = bootstrap_multiaddrs; + + p2p_config.reserved_nodes = reserved_multiaddrs; + + build_service_from_config(p2p_config).await + }; + + // pop() a single reserved node, so it's not run with the rest of the nodes + let mut reserved_node = reserved_nodes.pop(); + let reserved_node_peer_id = reserved_node.as_ref().unwrap().local_peer_id; + + let all_node_services: Vec<_> = bootstrap_nodes + .into_iter() + .chain(reserved_nodes.into_iter()) + .collect(); + + let mut all_nodes_ids: Vec = all_node_services + .iter() + .map(|service| service.local_peer_id) + .collect(); + + let (stop_sender, _) = watch::channel(()); + all_node_services.into_iter().for_each(|node| { + spawn(&stop_sender, node); + }); + + loop { + tokio::select! { + sentry_node_event = sentry_node.next_event() => { + // we've connected to all other peers + if sentry_node.peer_manager.total_peers_connected() > max_peers_allowed { + // if the `reserved_node` is not included, + // create and insert it, to be polled with rest of the nodes + if !all_nodes_ids + .iter() + .any(|local_peer_id| local_peer_id == &reserved_node_peer_id) { + if let Some(node) = reserved_node { + all_nodes_ids.push(node.local_peer_id); + spawn(&stop_sender, node); + reserved_node = None; + } + } + } + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { + // we connected to the desired reserved node + if peer_id == reserved_node_peer_id { + break + } + } + }, + } + } + stop_sender.send(()).unwrap(); +} + +#[tokio::test] +#[instrument] +async fn dont_connect_to_node_with_same_peer_id() { + let mut p2p_config = + Config::default_initialized("dont_connect_to_node_with_same_peer_id"); + let mut node_a = build_service_from_config(p2p_config.clone()).await; + // We don't use build_service_from_config here, because we want to use the same keypair + // to have the same PeerId + let node_b = { + // Given + p2p_config.reserved_nodes = node_a.multiaddrs(); + let max_block_size = p2p_config.max_block_size; + let (sender, _) = + broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); + + let mut service = + FuelP2PService::new(sender, p2p_config, PostcardCodec::new(max_block_size)) + .await + .unwrap(); + service.start().await.unwrap(); + service + }; + // When + tokio::time::timeout(Duration::from_secs(5), async move { + loop { + let event = node_a.next_event().await; + if let Some(FuelP2PEvent::PeerConnected(_)) = event { + panic!("Node B should not connect to Node A because they have the same PeerId"); + } + assert_eq!(node_a.peer_manager().total_peers_connected(), 0); + } + }) + .await + // Then + .expect_err("The node should not connect to itself"); + assert_eq!(node_b.peer_manager().total_peers_connected(), 0); +} + +// We start with two nodes, node_a and node_b, bootstrapped with `bootstrap_nodes_count` other nodes. +// Yet node_a and node_b are only allowed to connect to specified amount of nodes. +#[tokio::test] +#[instrument] +async fn max_peers_connected_works() { + let p2p_config = Config::default_initialized("max_peers_connected_works"); + + let bootstrap_nodes_count = 20; + let node_a_max_peers_allowed: usize = 3; + let node_b_max_peers_allowed: usize = 5; + + let (mut nodes, nodes_multiaddrs) = + setup_bootstrap_nodes(&p2p_config, bootstrap_nodes_count).await; + + // this node is allowed to only connect to `node_a_max_peers_allowed` other nodes + let mut node_a = { + let mut p2p_config = p2p_config.clone(); + p2p_config.max_discovery_peers_connected = node_a_max_peers_allowed as u32; + // it still tries to dial all nodes! + p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); + + build_service_from_config(p2p_config).await + }; + + // this node is allowed to only connect to `node_b_max_peers_allowed` other nodes + let mut node_b = { + let mut p2p_config = p2p_config.clone(); + p2p_config.max_discovery_peers_connected = node_b_max_peers_allowed as u32; + // it still tries to dial all nodes! + p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); + + build_service_from_config(p2p_config).await + }; + + let (tx, mut rx) = tokio::sync::oneshot::channel::<()>(); + let jh = tokio::spawn(async move { + while rx.try_recv().is_err() { + futures::stream::iter(nodes.iter_mut()) + .for_each_concurrent(4, |node| async move { + node.next_event().await; + }) + .await; + } + }); + + let mut node_a_hit_limit = false; + let mut node_b_hit_limit = false; + let mut instance = tokio::time::Instant::now(); + + // After we hit limit for node_a and node_b start timer. + // If we don't exceed the limit during 5 seconds, finish the test successfully. + while instance.elapsed().as_secs() < 5 { + tokio::select! { + event_from_node_a = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_a { + if node_a.peer_manager().total_peers_connected() > node_a_max_peers_allowed { + panic!("The node should only connect to max {node_a_max_peers_allowed} peers"); + } + node_a_hit_limit |= node_a.peer_manager().total_peers_connected() == node_a_max_peers_allowed; + } + tracing::info!("Event from the node_a: {:?}", event_from_node_a); + }, + event_from_node_b = node_b.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_b { + if node_b.peer_manager().total_peers_connected() > node_b_max_peers_allowed { + panic!("The node should only connect to max {node_b_max_peers_allowed} peers"); + } + node_b_hit_limit |= node_b.peer_manager().total_peers_connected() == node_b_max_peers_allowed; + } + tracing::info!("Event from the node_b: {:?}", event_from_node_b); + }, + } + + if !(node_a_hit_limit && node_b_hit_limit) { + instance = tokio::time::Instant::now(); + } + } + + tx.send(()).unwrap(); + jh.await.unwrap() +} + +// Simulate 2 Sets of Sentry nodes. +// In both Sets, a single Guarded Node should only be connected to their sentry nodes. +// While other nodes can and should connect to nodes outside of the Sentry Set. +#[tokio::test(flavor = "multi_thread")] +#[instrument] +async fn sentry_nodes_working() { + const RESERVED_NODE_SIZE: usize = 4; + + let mut p2p_config = Config::default_initialized("sentry_nodes_working"); + + async fn build_sentry_nodes(p2p_config: Config) -> (P2PService, Vec) { + let (reserved_nodes, reserved_multiaddrs) = + setup_bootstrap_nodes(&p2p_config, RESERVED_NODE_SIZE).await; + + // set up the guraded node service with `reserved_nodes_only_mode` + let guarded_node_service = { + let mut p2p_config = p2p_config.clone(); + p2p_config.reserved_nodes = reserved_multiaddrs; + p2p_config.reserved_nodes_only_mode = true; + build_service_from_config(p2p_config).await + }; + + let sentry_nodes = reserved_nodes; + + (guarded_node_service, sentry_nodes) + } + + let (mut first_guarded_node, mut first_sentry_nodes) = + build_sentry_nodes(p2p_config.clone()).await; + p2p_config.bootstrap_nodes = first_sentry_nodes + .iter() + .flat_map(|n| n.multiaddrs()) + .collect(); + + let (mut second_guarded_node, second_sentry_nodes) = + build_sentry_nodes(p2p_config).await; + + let first_sentry_set: HashSet<_> = first_sentry_nodes + .iter() + .map(|node| node.local_peer_id) + .collect(); + + let second_sentry_set: HashSet<_> = second_sentry_nodes + .iter() + .map(|node| node.local_peer_id) + .collect(); + + let mut single_sentry_node = first_sentry_nodes.pop().unwrap(); + let mut sentry_node_connections = HashSet::new(); + let (stop_sender, _) = watch::channel(()); + first_sentry_nodes + .into_iter() + .chain(second_sentry_nodes.into_iter()) + .for_each(|node| { + spawn(&stop_sender, node); + }); + + let mut instance = tokio::time::Instant::now(); + // After guards are connected to all sentries and at least one sentry has + // more connections than sentries in the group, start the timer.. + // If guards don't connected to new nodes during 5 seconds, finish the test successfully. + while instance.elapsed().as_secs() < 5 { + tokio::select! { + event_from_first_guarded = first_guarded_node.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_first_guarded { + if !first_sentry_set.contains(&peer_id) { + panic!("The node should only connect to the specified reserved nodes!"); + } + } + tracing::info!("Event from the first guarded node: {:?}", event_from_first_guarded); + }, + event_from_second_guarded = second_guarded_node.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_second_guarded { + if !second_sentry_set.contains(&peer_id) { + panic!("The node should only connect to the specified reserved nodes!"); + } + } + tracing::info!("Event from the second guarded node: {:?}", event_from_second_guarded); + }, + // Poll one of the reserved, sentry nodes + sentry_node_event = single_sentry_node.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { + sentry_node_connections.insert(peer_id); + } + } + }; + + // This reserved node has connected to more than the number of reserved nodes it is part of. + // It means it has discovered other nodes in the network. + if sentry_node_connections.len() < 2 * RESERVED_NODE_SIZE { + instance = tokio::time::Instant::now(); + } + } + stop_sender.send(()).unwrap(); +} + +// Simulates 2 p2p nodes that are on the same network and should connect via mDNS +// without any additional bootstrapping +#[tokio::test] +#[instrument] +async fn nodes_connected_via_mdns() { + // Node A + let mut p2p_config = Config::default_initialized("nodes_connected_via_mdns"); + p2p_config.enable_mdns = true; + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + let mut node_b = build_service_from_config(p2p_config).await; + + loop { + tokio::select! { + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { + // successfully connected to Node A + break + } + tracing::info!("Node B Event: {:?}", node_b_event); + }, + _ = node_a.swarm.select_next_some() => {}, + }; + } +} + +// Simulates 2 p2p nodes that are on the same network but their Fuel Upgrade checksum is different +// (different chain id or chain config) +// So they are not able to connect +#[tokio::test] +#[instrument] +async fn nodes_cannot_connect_due_to_different_checksum() { + use libp2p::TransportError; + // Node A + let mut p2p_config = + Config::default_initialized("nodes_cannot_connect_due_to_different_checksum"); + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // different checksum + p2p_config.checksum = [1u8; 32].into(); + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + // Node B + let mut node_b = build_service_from_config(p2p_config).await; + + loop { + tokio::select! { + node_a_event = node_a.swarm.select_next_some() => { + tracing::info!("Node A Event: {:?}", node_a_event); + if let SwarmEvent::IncomingConnectionError { error: ListenError::Transport(TransportError::Other(_)), .. } = node_a_event { + break + } + }, + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { + panic!("Node B should not connect to Node A!") + } + tracing::info!("Node B Event: {:?}", node_b_event); + }, + + }; + } +} + +// Simulates 3 p2p nodes, Node B & Node C are bootstrapped with Node A +// Using Identify Protocol Node C should be able to identify and connect to Node B +#[tokio::test] +#[instrument] +async fn nodes_connected_via_identify() { + // Node A + let mut p2p_config = Config::default_initialized("nodes_connected_via_identify"); + + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + // Node C + let mut node_c = build_service_from_config(p2p_config).await; + + loop { + tokio::select! { + node_a_event = node_a.next_event() => { + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + tracing::info!("Node B Event: {:?}", node_b_event); + }, + + node_c_event = node_c.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = node_c_event { + // we have connected to Node B! + if peer_id == node_b.local_peer_id { + break + } + } + + tracing::info!("Node C Event: {:?}", node_c_event); + } + }; + } +} + +// Simulates 2 p2p nodes that connect to each other and consequently exchange Peer Info +// On successful connection, node B updates its latest BlockHeight +// and shares it with Peer A via Heartbeat protocol +#[tokio::test] +#[instrument] +async fn peer_info_updates_work() { + let mut p2p_config = Config::default_initialized("peer_info_updates_work"); + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config).await; + + let latest_block_height = 40_u32.into(); + + loop { + tokio::select! { + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { + if let Some(PeerInfo { heartbeat_data, client_version, .. }) = node_a.peer_manager.get_peer_info(&peer_id) { + // Exits after it verifies that: + // 1. Peer Addresses are known + // 2. Client Version is known + // 3. Node has responded with their latest BlockHeight + if client_version.is_some() && heartbeat_data.block_height == Some(latest_block_height) { + break; + } + } + } + + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { + // we've connected to Peer A + // let's update our BlockHeight + node_b.update_block_height(latest_block_height); + } + + tracing::info!("Node B Event: {:?}", node_b_event); + } + } + } +} + +#[tokio::test] +#[instrument] +async fn gossipsub_broadcast_tx_with_accept__new_tx() { + tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .with_target(false) + .init(); + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::NewTx( + Arc::new(Transaction::default_test_tx()), + ), + GossipsubMessageAcceptance::Accept, + None, + ), + ) + .await + .unwrap(); + } +} + +#[tokio::test] +#[instrument] +async fn gossipsub_broadcast_tx_with_accept__tx_confirmations() { + tracing_subscriber::fmt() + .with_max_level(tracing::Level::INFO) + .with_target(false) + .init(); + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(20), + gossipsub_broadcast( + GossipsubBroadcastRequest::Confirmations(Arc::new( + TxConfirmations::default_test_tx(), + )), + GossipsubMessageAcceptance::Accept, + None, + ), + ) + .await + .unwrap(); + } +} + +#[tokio::test] +#[instrument] +async fn gossipsub_broadcast_tx_with_reject__new_tx() { + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::NewTx( + Arc::new(Transaction::default_test_tx()), + ), + GossipsubMessageAcceptance::Reject, + None, + ), + ) + .await + .unwrap(); + } +} + +#[tokio::test] +#[instrument] +async fn gossipsub_broadcast_tx_with_reject__tx_confirmations() { + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::Confirmations(Arc::new( + TxConfirmations::default_test_tx(), + )), + GossipsubMessageAcceptance::Reject, + None, + ), + ) + .await + .unwrap(); + } +} + +#[tokio::test] +#[instrument] +#[ignore] +async fn gossipsub_scoring_with_accepted_messages() { + gossipsub_scoring_tester( + "gossipsub_scoring_with_accepted_messages", + 100, + GossipsubMessageAcceptance::Accept, + ) + .await; +} + +/// At `GRAYLIST_THRESHOLD` the node will ignore all messages from the peer +/// And our PeerManager will ban the peer at that point - leading to disconnect +#[tokio::test] +#[instrument] +#[ignore] +async fn gossipsub_scoring_with_rejected_messages() { + gossipsub_scoring_tester( + "gossipsub_scoring_with_rejected_messages", + 100, + GossipsubMessageAcceptance::Reject, + ) + .await; +} + +// TODO: Move me before tests that use this function +/// Helper function for testing gossipsub scoring +/// ! Dev Note: this function runs forever, its purpose is to show the scoring in action with passage of time +async fn gossipsub_scoring_tester( + test_name: &str, + amount_of_msgs_per_second: usize, + acceptance: GossipsubMessageAcceptance, +) { + let mut p2p_config = Config::default_initialized(test_name); + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + // Node C + p2p_config.bootstrap_nodes = node_b.multiaddrs(); + let mut node_c = build_service_from_config(p2p_config.clone()).await; + + let mut interval = tokio::time::interval(Duration::from_secs(1)); + + loop { + tokio::select! { + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_a_event { + let msg_acceptance = to_message_acceptance(&acceptance); + node_a.report_message_validation_result(&message_id, peer_id, msg_acceptance); + } + } + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_b_event { + let msg_acceptance = to_message_acceptance(&acceptance); + node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); + } + }, + node_c_event = node_c.next_event() => { + if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_c_event { + let msg_acceptance = to_message_acceptance(&acceptance); + node_c.report_message_validation_result(&message_id, peer_id, msg_acceptance); + } + }, + _ = interval.tick() => { + let mut transactions = vec![]; + for _ in 0..amount_of_msgs_per_second { + let random_tx = + TransactionBuilder::script(rand::thread_rng().gen::<[u8; 32]>().to_vec(), rand::thread_rng().gen::<[u8; 32]>().to_vec()).finalize_as_transaction(); + + transactions.push(random_tx.clone()); + let random_tx = GossipsubBroadcastRequest::NewTx(Arc::new(random_tx)); + + match rand::thread_rng().gen_range(1..=3) { + 1 => { + // Node A sends a Transaction + let _ = node_a.publish_message(random_tx); + + }, + 2 => { + // Node B sends a Transaction + let _ = node_b.publish_message(random_tx); + + }, + 3 => { + // Node C sends a Transaction + let _ = node_c.publish_message(random_tx); + }, + _ => unreachable!("Random number generator is broken") + } + } + + eprintln!("Node A WORLD VIEW"); + eprintln!("B score: {:?}", node_a.get_peer_score(&node_b.local_peer_id).unwrap()); + eprintln!("C score: {:?}", node_a.get_peer_score(&node_c.local_peer_id).unwrap()); + eprintln!(); + + eprintln!("Node B WORLD VIEW"); + eprintln!("A score: {:?}", node_b.get_peer_score(&node_a.local_peer_id).unwrap()); + eprintln!("C score: {:?}", node_b.get_peer_score(&node_c.local_peer_id).unwrap()); + eprintln!(); + + eprintln!("Node C WORLD VIEW"); + eprintln!("A score: {:?}", node_c.get_peer_score(&node_a.local_peer_id).unwrap()); + eprintln!("B score: {:?}", node_c.get_peer_score(&node_b.local_peer_id).unwrap()); + eprintln!(); + + // never ending loop + // break; + } + } + } +} + +// TODO: Move me before tests that use this function +/// Reusable helper function for Broadcasting Gossipsub requests +async fn gossipsub_broadcast( + broadcast_request: GossipsubBroadcastRequest, + acceptance: GossipsubMessageAcceptance, + connection_limit: Option, +) { + let mut p2p_config = Config::default_initialized("gossipsub_exchanges_messages"); + + if let Some(connection_limit) = connection_limit { + p2p_config.max_gossipsub_peers_connected = connection_limit; + } + + let (selected_topic, selected_tag): (Sha256Topic, GossipTopicTag) = { + let (topic, tag) = match broadcast_request { + GossipsubBroadcastRequest::NewTx(_) => { + (NEW_TX_GOSSIP_TOPIC, GossipTopicTag::NewTx) + } + GossipsubBroadcastRequest::Confirmations(_) => ( + TX_CONFIRMATIONS_GOSSIP_TOPIC, + GossipTopicTag::TxConfirmations, + ), + }; + + ( + Topic::new(format!("{}/{}", topic, p2p_config.network_name)), + tag, + ) + }; + tracing::info!("Selected Topic: {:?}", selected_topic); + + let mut message_sent = false; + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + // Node C + p2p_config.bootstrap_nodes = node_b.multiaddrs(); + let mut node_c = build_service_from_config(p2p_config.clone()).await; + + // Node C does not connect to Node A + // it should receive the propagated message from Node B if `GossipsubMessageAcceptance` is `Accept` + node_c + .swarm + .behaviour_mut() + .block_peer(node_a.local_peer_id); + + let mut a_connected_to_b = false; + let mut b_connected_to_c = false; + loop { + // verifies that we've got at least a single peer address to send message to + if a_connected_to_b && b_connected_to_c && !message_sent { + message_sent = true; + let broadcast_request = broadcast_request.clone(); + node_a.publish_message(broadcast_request).unwrap(); + } + + tokio::select! { + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::NewSubscription { peer_id, tag }) = &node_a_event { + if tag != &selected_tag { + tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); + } else if peer_id == &node_b.local_peer_id { + a_connected_to_b = true; + } + } + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::NewSubscription { peer_id,tag, }) = &node_b_event { + tracing::info!("New subscription for peer_id: {:?} with tag: {:?}", peer_id, tag); + if tag != &selected_tag { + tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); + } else if peer_id == &node_c.local_peer_id { + b_connected_to_c = true; + } + } + + if let Some(FuelP2PEvent::GossipsubMessage { topic_hash, message, message_id, peer_id }) = node_b_event.clone() { + // Message Validation must be reported + // If it's `Accept`, Node B will propagate the message to Node C + // If it's `Ignore` or `Reject`, Node C should not receive anything + let msg_acceptance = to_message_acceptance(&acceptance); + node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); + if topic_hash != selected_topic.hash() { + tracing::error!("Wrong topic hash, expected: {} - actual: {}", selected_topic.hash(), topic_hash); + panic!("Wrong Topic"); + } + + check_message_matches_request(&message, &broadcast_request); + + // Node B received the correct message + // If we try to publish it again we will get `PublishError::Duplicate` + // This asserts that our MessageId calculation is consistent irrespective of which Peer sends it + let broadcast_request = broadcast_request.clone(); + matches!(node_b.publish_message(broadcast_request), Err(PublishError::Duplicate)); + + match acceptance { + GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { + break + }, + _ => { + // the `exit` should happen in Node C + } + } + } + + tracing::info!("Node B Event: {:?}", node_b_event); + } + + node_c_event = node_c.next_event() => { + if let Some(FuelP2PEvent::GossipsubMessage { peer_id, .. }) = node_c_event.clone() { + // Node B should be the source propagator + assert!(peer_id == node_b.local_peer_id); + match acceptance { + GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { + panic!("Node C should not receive Rejected or Ignored messages") + }, + GossipsubMessageAcceptance::Accept => { + break + } + } + } + } + }; + } +} + +fn check_message_matches_request( + message: &GossipsubMessage, + expected: &GossipsubBroadcastRequest, +) { + match (message, expected) { + (GossipsubMessage::NewTx(received), GossipsubBroadcastRequest::NewTx(requested)) => { + assert_eq!(requested.deref(), received, "Both messages were `NewTx`s, but the received message did not match the requested message"); + } + ( + GossipsubMessage::Confirmations(received), + GossipsubBroadcastRequest::Confirmations(requested), + ) => assert_eq!(requested.deref(), received, "Both messages were `Confirmations`, but the received message did not match the requested message"), + _ => panic!("Message does not match the expected request, expected: {:?}, actual: {:?}", expected, message), + } +} + +fn arbitrary_headers_for_range(range: Range) -> Vec { + let mut blocks = Vec::new(); + for i in range { + let mut header: BlockHeader = Default::default(); + header.set_block_height(i.into()); + + let sealed_block = SealedBlockHeader { + entity: header, + consensus: Consensus::PoA(PoAConsensus::new(Default::default())), + }; + blocks.push(sealed_block); + } + blocks +} + +// Metadata gets skipped during serialization, so this is the fuzzy way to compare blocks +fn eq_except_metadata(a: &SealedBlockHeader, b: &SealedBlockHeader) -> bool { + let app_eq = match (&a.entity, &b.entity) { + (BlockHeader::V1(a), BlockHeader::V1(b)) => a.application() == b.application(), + #[cfg(feature = "fault-proving")] + (BlockHeader::V2(a), BlockHeader::V2(b)) => a.application() == b.application(), + #[cfg_attr(not(feature = "fault-proving"), allow(unreachable_patterns))] + _ => false, + }; + app_eq && a.entity.consensus() == b.entity.consensus() +} + +async fn request_response_works_with( + request_msg: RequestMessage, + connection_limit: Option, +) { + let mut p2p_config = Config::default_initialized("request_response_works_with"); + + if let Some(connection_limit) = connection_limit { + p2p_config.max_request_response_peers_connected = connection_limit; + } + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); + + let mut request_sent = false; + + loop { + tokio::select! { + message_sent = rx_test_end.recv() => { + // we received a signal to end the test + assert!(message_sent.unwrap(), "Received incorrect or missing message"); + break; + } + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { + if node_a.peer_manager.get_peer_info(&peer_id).is_some() { + // 0. verifies that we've got at least a single peer address to request message from + if !request_sent { + request_sent = true; + + match request_msg.clone() { + RequestMessage::SealedHeaders(range) => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + let expected = arbitrary_headers_for_range(range.clone()); + + if let Ok(response) = response_message { + match response { + Ok((_, Ok(Ok(sealed_headers)))) => { + let check = expected.iter().zip(sealed_headers.iter()).all(|(a, b)| eq_except_metadata(a, b)); + let _ = tx_test_end.send(check).await; + }, + Ok((_, Ok(Err(e)))) => { + tracing::error!("Node A did not return any headers: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + Ok((_, Err(e))) => { + tracing::error!("Error in P2P communication: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + Err(e) => { + tracing::error!("Error in P2P before sending message: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + } + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + RequestMessage::Transactions(_range) => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::Transactions(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok(response) = response_message { + match response { + Ok((_, Ok(Ok(transactions)))) => { + let check = transactions.len() == 1 && transactions[0].0.len() == 5; + let _ = tx_test_end.send(check).await; + }, + Ok((_, Ok(Err(e)))) => { + tracing::error!("Node A did not return any transactions: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + Ok((_, Err(e))) => { + tracing::error!("Error in P2P communication: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + Err(e) => { + tracing::error!("Error in P2P before sending message: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + } + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + RequestMessage::TxPoolAllTransactionsIds => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolAllTransactionsIds(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok((_, Ok(Ok(transaction_ids)))) = response_message { + let tx_ids: Vec = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); + let check = transaction_ids.len() == 5 && transaction_ids.iter().zip(tx_ids.iter()).all(|(a, b)| a == b); + let _ = tx_test_end.send(check).await; + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + RequestMessage::TxPoolFullTransactions(tx_ids) => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolFullTransactions(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok((_, Ok(Ok(transactions)))) = response_message { + let txs: Vec> = tx_ids.iter().enumerate().map(|(i, _)| { + if i == 0 { + None + } else { + Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) + } + }).collect(); + let check = transactions.len() == tx_ids.len() && transactions.iter().zip(txs.iter()).all(|(a, b)| a == b); + let _ = tx_test_end.send(check).await; + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + } + } + } + } + + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator + if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: received_request_message }) = &node_b_event { + match received_request_message { + RequestMessage::SealedHeaders(range) => { + let sealed_headers: Vec<_> = arbitrary_headers_for_range(range.clone()); + + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); + } + RequestMessage::Transactions(_) => { + let txs = (0..5).map(|_| Transaction::default_test_tx()).collect(); + let transactions = vec![Transactions(txs)]; + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::Transactions(Ok(transactions))); + } + RequestMessage::TxPoolAllTransactionsIds => { + let tx_ids = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolAllTransactionsIds(Ok(tx_ids))); + } + RequestMessage::TxPoolFullTransactions(tx_ids) => { + let txs = tx_ids.iter().enumerate().map(|(i, _)| { + if i == 0 { + None + } else { + Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) + } + }).collect(); + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolFullTransactions(Ok(txs))); + } + } + } + + tracing::info!("Node B Event: {:?}", node_b_event); + } + }; + } +} + +#[tokio::test] +#[instrument] +async fn request_response_works_with_transactions() { + let arbitrary_range = 2..6; + request_response_works_with(RequestMessage::Transactions(arbitrary_range), None).await +} + +#[tokio::test] +#[instrument] +async fn request_response_works_with_sealed_headers_range_inclusive() { + let arbitrary_range = 2..6; + request_response_works_with(RequestMessage::SealedHeaders(arbitrary_range), None) + .await +} + +#[tokio::test] +#[instrument] +async fn request_response_works_with_transactions_ids() { + request_response_works_with(RequestMessage::TxPoolAllTransactionsIds, None).await +} + +#[tokio::test] +#[instrument] +async fn request_response_works_with_full_transactions() { + let tx_ids = (0..10) + .map(|_| Transaction::default_test_tx().id(&ChainId::new(1))) + .collect(); + request_response_works_with(RequestMessage::TxPoolFullTransactions(tx_ids), None) + .await +} + +/// We send a request for transactions, but it's responded by only headers +#[tokio::test] +#[instrument] +async fn invalid_response_type_is_detected() { + let mut p2p_config = Config::default_initialized("invalid_response_type_is_detected"); + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); + + let mut request_sent = false; + + loop { + tokio::select! { + message_sent = rx_test_end.recv() => { + // we received a signal to end the test + assert!(message_sent.unwrap(), "Received incorrect or missing message"); + break; + } + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { + if node_a.peer_manager.get_peer_info(&peer_id).is_some() { + // 0. verifies that we've got at least a single peer address to request message from + if !request_sent { + request_sent = true; + + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, RequestMessage::Transactions(0..2), ResponseSender::Transactions(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok(response) = response_message { + match response { + Ok((_, Ok(_))) => { + let _ = tx_test_end.send(false).await; + panic!("Request succeeded unexpectedly"); + }, + Ok((_, Err(ResponseError::TypeMismatch))) => { + // Got Invalid Response Type as expected, so end test + let _ = tx_test_end.send(true).await; + }, + Ok((_, Err(err))) => { + let _ = tx_test_end.send(false).await; + panic!("Unexpected error in P2P communication: {:?}", err); + }, + Err(e) => { + let _ = tx_test_end.send(false).await; + panic!("Error in P2P before sending message: {:?}", e); + }, + } + } else { + let _ = tx_test_end.send(false).await; + panic!("Orchestrator failed to receive a message: {:?}", response_message); + } + }); + } + } + } + + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator + if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: _ }) = &node_b_event { + let sealed_headers: Vec<_> = arbitrary_headers_for_range(1..3); + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); + } + + tracing::info!("Node B Event: {:?}", node_b_event); + } + }; + } +} + +#[tokio::test] +#[instrument] +async fn req_res_outbound_timeout_works() { + let mut p2p_config = Config::default_initialized("req_res_outbound_timeout_works"); + + // Node A + // setup request timeout to 1ms in order for the Request to fail + p2p_config.set_request_timeout = Duration::from_millis(1); + + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + p2p_config.set_request_timeout = Duration::from_secs(20); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + let (tx_test_end, mut rx_test_end) = tokio::sync::mpsc::channel(1); + + // track the request sent in order to avoid duplicate sending + let mut request_sent = false; + + loop { + tokio::select! { + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { + if node_a.peer_manager.get_peer_info(&peer_id).is_some() { + // 0. verifies that we've got at least a single peer address to request message from + if !request_sent { + request_sent = true; + + // 1. Simulating Oneshot channel from the NetworkOrchestrator + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + + // 2a. there should be ZERO pending outbound requests in the table + assert_eq!(node_a.outbound_requests_table.len(), 0); + + // Request successfully sent + let requested_block_height = RequestMessage::SealedHeaders(0..0); + assert!(node_a.send_request_msg(None, requested_block_height, ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); + + // 2b. there should be ONE pending outbound requests in the table + assert_eq!(node_a.outbound_requests_table.len(), 1); + + let tx_test_end = tx_test_end.clone(); + + tokio::spawn(async move { + // 3. Simulating NetworkOrchestrator receiving a Timeout Error Message! + let response_message = rx_orchestrator.await; + if let Ok(response) = response_message { + match response { + Ok((_, Ok(_))) => { + let _ = tx_test_end.send(false).await; + panic!("Request succeeded unexpectedly"); + }, + Ok((_, Err(ResponseError::P2P(_)))) => { + // Got Invalid Response Type as expected, so end test + let _ = tx_test_end.send(true).await; + }, + Ok((_, Err(err))) => { + let _ = tx_test_end.send(false).await; + panic!("Unexpected error in P2P communication: {:?}", err); + }, + Err(e) => { + let _ = tx_test_end.send(false).await; + panic!("Error in P2P before sending message: {:?}", e); + }, + } + } else { + let _ = tx_test_end.send(false).await; + panic!("Orchestrator failed to receive a message: {:?}", response_message); + } + }); + } + } + } + + tracing::info!("Node A Event: {:?}", node_a_event); + }, + recv = rx_test_end.recv() => { + assert_eq!(recv, Some(true), "Test failed"); + // we received a signal to end the test + // 4. there should be ZERO pending outbound requests in the table + // after the Outbound Request Failed with Timeout + assert_eq!(node_a.outbound_requests_table.len(), 0); + break; + }, + // will not receive the request at all + node_b_event = node_b.next_event() => { + tracing::info!("Node B Event: {:?}", node_b_event); + } + }; + } +} + +#[tokio::test] +async fn gossipsub_peer_limit_works() { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::NewTx(Arc::new( + Transaction::default_test_tx(), + )), + GossipsubMessageAcceptance::Accept, + Some(1) // limit to 1 peer, therefore the function will timeout, as it will not be able to propagate the message + ), + ) + .await.expect_err("Should have timed out"); +} + +#[tokio::test] +async fn request_response_peer_limit_works() { + let handle = tokio::spawn(async { + let arbitrary_range = 2..6; + + request_response_works_with( + RequestMessage::Transactions(arbitrary_range), + Some(0), // limit to 0 peers, + ) + .await; + }); + + let result = handle.await; + assert!(result.is_err()); +} diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 1b70e704d99..6d88dfc3aba 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -106,6 +106,12 @@ use tokio::{ }; use tracing::warn; +#[cfg(test)] +pub mod task_tests; + +#[cfg(test)] +pub mod broadcast_tests; + const CHANNEL_SIZE: usize = 1024 * 10; pub type Service = ServiceRunner>; @@ -403,9 +409,10 @@ impl Broadcast for SharedState { fn confirmations_broadcast( &self, - _confirmations: ConfirmationsGossipData, + confirmations: ConfirmationsGossipData, ) -> anyhow::Result<()> { - todo!(); + self.confirmations_broadcast.send(confirmations)?; + Ok(()) } fn new_tx_subscription_broadcast(&self, peer_id: FuelPeerId) -> anyhow::Result<()> { @@ -1078,6 +1085,8 @@ pub struct SharedState { new_tx_subscription_broadcast: broadcast::Sender, /// Sender of p2p transaction used for subscribing. tx_broadcast: broadcast::Sender, + /// Sender of p2p tx confirmations used for subscribing. + confirmations_broadcast: broadcast::Sender, /// Sender of reserved peers connection updates. reserved_peers_broadcast: broadcast::Sender, /// Used for communicating with the `Task`. @@ -1307,6 +1316,12 @@ impl SharedState { self.tx_broadcast.subscribe() } + pub fn subscribe_confirmations( + &self, + ) -> broadcast::Receiver { + self.confirmations_broadcast.subscribe() + } + pub fn subscribe_block_height( &self, ) -> broadcast::Receiver { @@ -1349,6 +1364,7 @@ pub fn build_shared_state( ) -> (SharedState, Receiver) { let (request_sender, request_receiver) = mpsc::channel(CHANNEL_SIZE); let (tx_broadcast, _) = broadcast::channel(CHANNEL_SIZE); + let (confirmations_broadcast, _) = broadcast::channel(CHANNEL_SIZE); let (new_tx_subscription_broadcast, _) = broadcast::channel(CHANNEL_SIZE); let (block_height_broadcast, _) = broadcast::channel(CHANNEL_SIZE); @@ -1365,6 +1381,7 @@ pub fn build_shared_state( request_sender, new_tx_subscription_broadcast, tx_broadcast, + confirmations_broadcast, reserved_peers_broadcast, block_height_broadcast, max_txs_per_request: config.max_txs_per_request, @@ -1433,587 +1450,3 @@ fn report_message( warn!(target: "fuel-p2p", "Failed to read PeerId from received GossipsubMessageId: {}", msg_id); } } - -#[cfg(test)] -pub mod tests { - #![allow(non_snake_case)] - use crate::ports::P2pDb; - - use super::*; - - use crate::{ - gossipsub::topics::TX_CONFIRMATIONS_GOSSIP_TOPIC, - peer_manager::heartbeat_data::HeartbeatData, - }; - use fuel_core_services::{ - Service, - State, - }; - use fuel_core_storage::Result as StorageResult; - use fuel_core_types::{ - blockchain::consensus::Genesis, - fuel_types::BlockHeight, - services::p2p::TxConfirmations, - }; - use futures::FutureExt; - use libp2p::gossipsub::TopicHash; - use std::{ - collections::VecDeque, - time::SystemTime, - }; - - #[derive(Clone, Debug)] - struct FakeDb; - - impl AtomicView for FakeDb { - type LatestView = Self; - - fn latest_view(&self) -> StorageResult { - Ok(self.clone()) - } - } - - impl P2pDb for FakeDb { - fn get_sealed_headers( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - unimplemented!() - } - - fn get_transactions( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - unimplemented!() - } - - fn get_genesis(&self) -> StorageResult { - Ok(Default::default()) - } - } - - #[derive(Clone, Debug)] - struct FakeBlockImporter; - - impl BlockHeightImporter for FakeBlockImporter { - fn next_block_height(&self) -> BoxStream { - Box::pin(fuel_core_services::stream::pending()) - } - } - - #[derive(Clone, Debug)] - struct FakeTxPool; - - impl TxPool for FakeTxPool { - async fn get_tx_ids( - &self, - _max_txs: usize, - ) -> anyhow::Result> { - Ok(vec![]) - } - - async fn get_full_txs( - &self, - tx_ids: Vec, - ) -> anyhow::Result>> { - Ok(tx_ids.iter().map(|_| None).collect()) - } - } - - #[tokio::test] - async fn start_and_stop_awaits_works() { - let p2p_config = Config::::default("start_stop_works"); - let (shared_state, request_receiver) = build_shared_state(p2p_config.clone()); - let service = new_service( - ChainId::default(), - 0.into(), - p2p_config, - shared_state, - request_receiver, - FakeDb, - FakeBlockImporter, - FakeTxPool, - ); - - // Node with p2p service started - assert!(service.start_and_await().await.unwrap().started()); - // Node with p2p service stopped - assert!(service.stop_and_await().await.unwrap().stopped()); - } - - struct FakeP2PService { - peer_info: Vec<(PeerId, PeerInfo)>, - next_event_stream: BoxStream, - } - - impl TaskP2PService for FakeP2PService { - fn update_metrics(&self, _: T) - where - T: FnOnce(), - { - unimplemented!() - } - - fn get_all_peer_info(&self) -> Vec<(&PeerId, &PeerInfo)> { - self.peer_info.iter().map(|tup| (&tup.0, &tup.1)).collect() - } - - fn get_peer_id_with_height(&self, _height: &BlockHeight) -> Option { - todo!() - } - - fn next_event(&mut self) -> BoxFuture<'_, Option> { - self.next_event_stream.next().boxed() - } - - fn publish_message( - &mut self, - _message: GossipsubBroadcastRequest, - ) -> anyhow::Result<()> { - todo!() - } - - fn send_request_msg( - &mut self, - _peer_id: Option, - _request_msg: RequestMessage, - _on_response: ResponseSender, - ) -> anyhow::Result<()> { - todo!() - } - - fn send_response_msg( - &mut self, - _request_id: InboundRequestId, - _message: V2ResponseMessage, - ) -> anyhow::Result<()> { - todo!() - } - - fn report_message( - &mut self, - _message: GossipsubMessageInfo, - _acceptance: GossipsubMessageAcceptance, - ) -> anyhow::Result<()> { - todo!() - } - - fn report_peer( - &mut self, - _peer_id: PeerId, - _score: AppScore, - _reporting_service: &str, - ) -> anyhow::Result<()> { - todo!() - } - - fn update_block_height(&mut self, _height: BlockHeight) -> anyhow::Result<()> { - Ok(()) - } - } - - #[derive(Clone)] - struct FakeDB; - - impl AtomicView for FakeDB { - type LatestView = Self; - - fn latest_view(&self) -> StorageResult { - Ok(self.clone()) - } - } - - impl P2pDb for FakeDB { - fn get_sealed_headers( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - todo!() - } - - fn get_transactions( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - todo!() - } - - fn get_genesis(&self) -> StorageResult { - todo!() - } - } - - struct FakeBroadcast { - pub peer_reports: mpsc::Sender<(FuelPeerId, AppScore, String)>, - pub confirmation_gossip_broadcast: mpsc::Sender, - } - - impl Broadcast for FakeBroadcast { - fn report_peer( - &self, - peer_id: FuelPeerId, - report: AppScore, - reporting_service: &'static str, - ) -> anyhow::Result<()> { - self.peer_reports.try_send(( - peer_id, - report, - reporting_service.to_string(), - ))?; - Ok(()) - } - - fn block_height_broadcast( - &self, - _block_height_data: BlockHeightHeartbeatData, - ) -> anyhow::Result<()> { - todo!() - } - - fn tx_broadcast( - &self, - _transaction: TransactionGossipData, - ) -> anyhow::Result<()> { - todo!() - } - - fn confirmations_broadcast( - &self, - confirmations: ConfirmationsGossipData, - ) -> anyhow::Result<()> { - self.confirmation_gossip_broadcast.try_send(confirmations)?; - Ok(()) - } - - fn new_tx_subscription_broadcast( - &self, - _peer_id: FuelPeerId, - ) -> anyhow::Result<()> { - todo!() - } - } - - #[tokio::test] - async fn peer_heartbeat_reputation_checks__slow_heartbeat_sends_reports() { - // given - let peer_id = PeerId::random(); - // more than limit - let last_duration = Duration::from_secs(30); - let mut durations = VecDeque::new(); - durations.push_front(last_duration); - - let heartbeat_data = HeartbeatData { - block_height: None, - last_heartbeat: Instant::now(), - last_heartbeat_sys: SystemTime::now(), - window: 0, - durations, - }; - let peer_info = PeerInfo { - peer_addresses: Default::default(), - client_version: None, - heartbeat_data, - score: 100.0, - }; - let peer_info = vec![(peer_id, peer_info)]; - let p2p_service = FakeP2PService { - peer_info, - next_event_stream: Box::pin(futures::stream::pending()), - }; - let (request_sender, request_receiver) = mpsc::channel(100); - - let (report_sender, mut report_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: report_sender, - confirmation_gossip_broadcast: mpsc::channel(100).0, - }; - - // Less than actual - let heartbeat_max_avg_interval = Duration::from_secs(20); - // Greater than actual - let heartbeat_max_time_since_last = Duration::from_secs(40); - - // Arbitrary values - let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { - old_heartbeat_penalty: 5.6, - low_heartbeat_frequency_penalty: 20.45, - }; - - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - view_provider: FakeDB, - next_block_height: FakeBlockImporter.next_block_height(), - tx_pool: FakeTxPool, - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval, - heartbeat_max_time_since_last, - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); - let mut watcher = StateWatcher::from(watch_receiver); - - // when - let (report_peer_id, report, reporting_service) = tokio::time::timeout( - Duration::from_secs(1), - wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), - ) - .await - .unwrap(); - - // then - watch_sender.send(State::Stopped).unwrap(); - - assert_eq!( - FuelPeerId::from(peer_id.to_bytes().to_vec()), - report_peer_id - ); - assert_eq!( - report, - heartbeat_peer_reputation_config.low_heartbeat_frequency_penalty - ); - assert_eq!(reporting_service, "p2p"); - } - - #[tokio::test] - async fn peer_heartbeat_reputation_checks__old_heartbeat_sends_reports() { - // given - let peer_id = PeerId::random(); - // under the limit - let last_duration = Duration::from_secs(5); - let last_heartbeat = Instant::now() - Duration::from_secs(50); - let last_heartbeat_sys = SystemTime::now() - Duration::from_secs(50); - let mut durations = VecDeque::new(); - durations.push_front(last_duration); - - let heartbeat_data = HeartbeatData { - block_height: None, - last_heartbeat, - last_heartbeat_sys, - window: 0, - durations, - }; - let peer_info = PeerInfo { - peer_addresses: Default::default(), - client_version: None, - heartbeat_data, - score: 100.0, - }; - let peer_info = vec![(peer_id, peer_info)]; - let p2p_service = FakeP2PService { - peer_info, - next_event_stream: Box::pin(futures::stream::pending()), - }; - let (request_sender, request_receiver) = mpsc::channel(100); - - let (report_sender, mut report_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: report_sender, - confirmation_gossip_broadcast: mpsc::channel(100).0, - }; - - // Greater than actual - let heartbeat_max_avg_interval = Duration::from_secs(20); - // Less than actual - let heartbeat_max_time_since_last = Duration::from_secs(40); - - // Arbitrary values - let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { - old_heartbeat_penalty: 5.6, - low_heartbeat_frequency_penalty: 20.45, - }; - - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - view_provider: FakeDB, - tx_pool: FakeTxPool, - next_block_height: FakeBlockImporter.next_block_height(), - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval, - heartbeat_max_time_since_last, - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); - let mut watcher = StateWatcher::from(watch_receiver); - - // when - // we run this in a loop to ensure that the task is run until it reports - let (report_peer_id, report, reporting_service) = tokio::time::timeout( - Duration::from_secs(1), - wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), - ) - .await - .unwrap(); - - // then - watch_sender.send(State::Stopped).unwrap(); - - assert_eq!( - FuelPeerId::from(peer_id.to_bytes().to_vec()), - report_peer_id - ); - assert_eq!( - report, - heartbeat_peer_reputation_config.old_heartbeat_penalty - ); - assert_eq!(reporting_service, "p2p"); - } - - async fn wait_until_report_received( - report_receiver: &mut Receiver<(FuelPeerId, AppScore, String)>, - task: &mut Task, - watcher: &mut StateWatcher, - ) -> (FuelPeerId, AppScore, String) { - loop { - task.run(watcher).await; - if let Ok((peer_id, recv_report, service)) = report_receiver.try_recv() { - return (peer_id, recv_report, service); - } - } - } - - #[tokio::test] - async fn should_process_all_imported_block_under_infinite_events_from_p2p() { - // Given - let (blocks_processed_sender, mut block_processed_receiver) = mpsc::channel(1); - let next_block_height = Box::pin(futures::stream::repeat_with(move || { - blocks_processed_sender.try_send(()).unwrap(); - BlockHeight::from(0) - })); - let infinite_event_stream = Box::pin(futures::stream::empty()); - let p2p_service = FakeP2PService { - peer_info: vec![], - next_event_stream: infinite_event_stream, - }; - - // Initialization - let (request_sender, request_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: mpsc::channel(100).0, - confirmation_gossip_broadcast: mpsc::channel(100).0, - }; - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - tx_pool: FakeTxPool, - view_provider: FakeDB, - next_block_height, - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval: Default::default(), - heartbeat_max_time_since_last: Default::default(), - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: Default::default(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let mut watcher = StateWatcher::started(); - // End of initialization - - for _ in 0..100 { - // When - task.run(&mut watcher).await; - - // Then - block_processed_receiver - .try_recv() - .expect("Should process the block height even under p2p pressure"); - } - } - - fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { - let peer_id = PeerId::random(); - let message_id = vec![1, 2, 3, 4, 5].into(); - let topic_hash = TopicHash::from_raw(TX_CONFIRMATIONS_GOSSIP_TOPIC); - let confirmations = TxConfirmations::default_test_tx(); - let message = GossipsubMessage::Confirmations(confirmations); - FuelP2PEvent::GossipsubMessage { - peer_id, - message_id, - topic_hash, - message, - } - } - - #[tokio::test] - async fn run__gossip_message_from_p2p_service_is_broadcasted__tx_confirmations() { - // given - let gossip_message_event = arb_tx_confirmation_gossip_message(); - let events = vec![gossip_message_event.clone()]; - let event_stream = futures::stream::iter(events); - let p2p_service = FakeP2PService { - peer_info: vec![], - next_event_stream: Box::pin(event_stream), - }; - let (confirmations_sender, mut confirmations_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: mpsc::channel(100).0, - confirmation_gossip_broadcast: confirmations_sender, - }; - let (request_sender, request_receiver) = mpsc::channel(100); - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - view_provider: FakeDB, - next_block_height: FakeBlockImporter.next_block_height(), - tx_pool: FakeTxPool, - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval: Default::default(), - heartbeat_max_time_since_last: Default::default(), - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: Default::default(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - - // when - let mut watcher = StateWatcher::started(); - task.run(&mut watcher).await; - tokio::time::sleep(Duration::from_millis(100)).await; - - // then - let actual = confirmations_receiver.try_recv().unwrap().data.unwrap(); - let FuelP2PEvent::GossipsubMessage { message, .. } = gossip_message_event else { - panic!("Expected GossipsubMessage event"); - }; - let GossipsubMessage::Confirmations(expected) = message else { - panic!("Expected Confirmations message"); - }; - assert_eq!(expected, actual); - } -} diff --git a/crates/services/p2p/src/service/broadcast_tests.rs b/crates/services/p2p/src/service/broadcast_tests.rs new file mode 100644 index 00000000000..77ad54503d6 --- /dev/null +++ b/crates/services/p2p/src/service/broadcast_tests.rs @@ -0,0 +1,32 @@ +#![allow(non_snake_case)] + +use super::*; +use fuel_core_types::services::p2p::TxConfirmations; + +fn arb_shared_state() -> SharedState { + let config = Config::default("test network"); + let (shared_state, _) = build_shared_state(config); + shared_state +} + +#[tokio::test] +async fn shared_state__broadcast__tx_confirmations() { + // given + let broadcast = arb_shared_state(); + let confirmations = TxConfirmations::default_test_tx(); + let confirmations_gossip_data = ConfirmationsGossipData { + data: Some(confirmations.clone()), + peer_id: FuelPeerId::from(PeerId::random().to_bytes().to_vec()), + message_id: vec![1, 2, 3, 4], + }; + let mut confirmations_receiver = broadcast.subscribe_confirmations(); + + // when + broadcast + .confirmations_broadcast(confirmations_gossip_data) + .unwrap(); + + // then + let actual = confirmations_receiver.try_recv().unwrap().data.unwrap(); + assert_eq!(confirmations, actual); +} diff --git a/crates/services/p2p/src/service/task_tests.rs b/crates/services/p2p/src/service/task_tests.rs new file mode 100644 index 00000000000..1821975d334 --- /dev/null +++ b/crates/services/p2p/src/service/task_tests.rs @@ -0,0 +1,571 @@ +#![allow(non_snake_case)] +use crate::ports::P2pDb; + +use super::*; + +use crate::{ + gossipsub::topics::TX_CONFIRMATIONS_GOSSIP_TOPIC, + peer_manager::heartbeat_data::HeartbeatData, +}; +use fuel_core_services::{ + Service, + State, +}; +use fuel_core_storage::Result as StorageResult; +use fuel_core_types::{ + blockchain::consensus::Genesis, + fuel_types::BlockHeight, + services::p2p::TxConfirmations, +}; +use futures::FutureExt; +use libp2p::gossipsub::TopicHash; +use std::{ + collections::VecDeque, + time::SystemTime, +}; + +#[derive(Clone, Debug)] +struct FakeDb; + +impl AtomicView for FakeDb { + type LatestView = Self; + + fn latest_view(&self) -> StorageResult { + Ok(self.clone()) + } +} + +impl P2pDb for FakeDb { + fn get_sealed_headers( + &self, + _block_height_range: Range, + ) -> StorageResult>> { + unimplemented!() + } + + fn get_transactions( + &self, + _block_height_range: Range, + ) -> StorageResult>> { + unimplemented!() + } + + fn get_genesis(&self) -> StorageResult { + Ok(Default::default()) + } +} + +#[derive(Clone, Debug)] +struct FakeBlockImporter; + +impl BlockHeightImporter for FakeBlockImporter { + fn next_block_height(&self) -> BoxStream { + Box::pin(fuel_core_services::stream::pending()) + } +} + +#[derive(Clone, Debug)] +struct FakeTxPool; + +impl TxPool for FakeTxPool { + async fn get_tx_ids( + &self, + _max_txs: usize, + ) -> anyhow::Result> { + Ok(vec![]) + } + + async fn get_full_txs( + &self, + tx_ids: Vec, + ) -> anyhow::Result>> { + Ok(tx_ids.iter().map(|_| None).collect()) + } +} + +#[tokio::test] +async fn start_and_stop_awaits_works() { + let p2p_config = Config::::default("start_stop_works"); + let (shared_state, request_receiver) = build_shared_state(p2p_config.clone()); + let service = new_service( + ChainId::default(), + 0.into(), + p2p_config, + shared_state, + request_receiver, + FakeDb, + FakeBlockImporter, + FakeTxPool, + ); + + // Node with p2p service started + assert!(service.start_and_await().await.unwrap().started()); + // Node with p2p service stopped + assert!(service.stop_and_await().await.unwrap().stopped()); +} + +struct FakeP2PService { + peer_info: Vec<(PeerId, PeerInfo)>, + next_event_stream: BoxStream, +} + +impl TaskP2PService for FakeP2PService { + fn update_metrics(&self, _: T) + where + T: FnOnce(), + { + unimplemented!() + } + + fn get_all_peer_info(&self) -> Vec<(&PeerId, &PeerInfo)> { + self.peer_info.iter().map(|tup| (&tup.0, &tup.1)).collect() + } + + fn get_peer_id_with_height(&self, _height: &BlockHeight) -> Option { + todo!() + } + + fn next_event(&mut self) -> BoxFuture<'_, Option> { + self.next_event_stream.next().boxed() + } + + fn publish_message( + &mut self, + _message: GossipsubBroadcastRequest, + ) -> anyhow::Result<()> { + todo!() + } + + fn send_request_msg( + &mut self, + _peer_id: Option, + _request_msg: RequestMessage, + _on_response: ResponseSender, + ) -> anyhow::Result<()> { + todo!() + } + + fn send_response_msg( + &mut self, + _request_id: InboundRequestId, + _message: V2ResponseMessage, + ) -> anyhow::Result<()> { + todo!() + } + + fn report_message( + &mut self, + _message: GossipsubMessageInfo, + _acceptance: GossipsubMessageAcceptance, + ) -> anyhow::Result<()> { + todo!() + } + + fn report_peer( + &mut self, + _peer_id: PeerId, + _score: AppScore, + _reporting_service: &str, + ) -> anyhow::Result<()> { + todo!() + } + + fn update_block_height(&mut self, _height: BlockHeight) -> anyhow::Result<()> { + Ok(()) + } +} + +#[derive(Clone)] +struct FakeDB; + +impl AtomicView for FakeDB { + type LatestView = Self; + + fn latest_view(&self) -> StorageResult { + Ok(self.clone()) + } +} + +impl P2pDb for FakeDB { + fn get_sealed_headers( + &self, + _block_height_range: Range, + ) -> StorageResult>> { + todo!() + } + + fn get_transactions( + &self, + _block_height_range: Range, + ) -> StorageResult>> { + todo!() + } + + fn get_genesis(&self) -> StorageResult { + todo!() + } +} + +struct FakeBroadcast { + pub peer_reports: mpsc::Sender<(FuelPeerId, AppScore, String)>, + pub confirmation_gossip_broadcast: mpsc::Sender, +} + +impl Broadcast for FakeBroadcast { + fn report_peer( + &self, + peer_id: FuelPeerId, + report: AppScore, + reporting_service: &'static str, + ) -> anyhow::Result<()> { + self.peer_reports + .try_send((peer_id, report, reporting_service.to_string()))?; + Ok(()) + } + + fn block_height_broadcast( + &self, + _block_height_data: BlockHeightHeartbeatData, + ) -> anyhow::Result<()> { + todo!() + } + + fn tx_broadcast(&self, _transaction: TransactionGossipData) -> anyhow::Result<()> { + todo!() + } + + fn confirmations_broadcast( + &self, + confirmations: ConfirmationsGossipData, + ) -> anyhow::Result<()> { + self.confirmation_gossip_broadcast.try_send(confirmations)?; + Ok(()) + } + + fn new_tx_subscription_broadcast(&self, _peer_id: FuelPeerId) -> anyhow::Result<()> { + todo!() + } +} + +#[tokio::test] +async fn peer_heartbeat_reputation_checks__slow_heartbeat_sends_reports() { + // given + let peer_id = PeerId::random(); + // more than limit + let last_duration = Duration::from_secs(30); + let mut durations = VecDeque::new(); + durations.push_front(last_duration); + + let heartbeat_data = HeartbeatData { + block_height: None, + last_heartbeat: Instant::now(), + last_heartbeat_sys: SystemTime::now(), + window: 0, + durations, + }; + let peer_info = PeerInfo { + peer_addresses: Default::default(), + client_version: None, + heartbeat_data, + score: 100.0, + }; + let peer_info = vec![(peer_id, peer_info)]; + let p2p_service = FakeP2PService { + peer_info, + next_event_stream: Box::pin(futures::stream::pending()), + }; + let (request_sender, request_receiver) = mpsc::channel(100); + + let (report_sender, mut report_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: report_sender, + confirmation_gossip_broadcast: mpsc::channel(100).0, + }; + + // Less than actual + let heartbeat_max_avg_interval = Duration::from_secs(20); + // Greater than actual + let heartbeat_max_time_since_last = Duration::from_secs(40); + + // Arbitrary values + let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { + old_heartbeat_penalty: 5.6, + low_heartbeat_frequency_penalty: 20.45, + }; + + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + view_provider: FakeDB, + next_block_height: FakeBlockImporter.next_block_height(), + tx_pool: FakeTxPool, + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval, + heartbeat_max_time_since_last, + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); + let mut watcher = StateWatcher::from(watch_receiver); + + // when + let (report_peer_id, report, reporting_service) = tokio::time::timeout( + Duration::from_secs(1), + wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), + ) + .await + .unwrap(); + + // then + watch_sender.send(State::Stopped).unwrap(); + + assert_eq!( + FuelPeerId::from(peer_id.to_bytes().to_vec()), + report_peer_id + ); + assert_eq!( + report, + heartbeat_peer_reputation_config.low_heartbeat_frequency_penalty + ); + assert_eq!(reporting_service, "p2p"); +} + +#[tokio::test] +async fn peer_heartbeat_reputation_checks__old_heartbeat_sends_reports() { + // given + let peer_id = PeerId::random(); + // under the limit + let last_duration = Duration::from_secs(5); + let last_heartbeat = Instant::now() - Duration::from_secs(50); + let last_heartbeat_sys = SystemTime::now() - Duration::from_secs(50); + let mut durations = VecDeque::new(); + durations.push_front(last_duration); + + let heartbeat_data = HeartbeatData { + block_height: None, + last_heartbeat, + last_heartbeat_sys, + window: 0, + durations, + }; + let peer_info = PeerInfo { + peer_addresses: Default::default(), + client_version: None, + heartbeat_data, + score: 100.0, + }; + let peer_info = vec![(peer_id, peer_info)]; + let p2p_service = FakeP2PService { + peer_info, + next_event_stream: Box::pin(futures::stream::pending()), + }; + let (request_sender, request_receiver) = mpsc::channel(100); + + let (report_sender, mut report_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: report_sender, + confirmation_gossip_broadcast: mpsc::channel(100).0, + }; + + // Greater than actual + let heartbeat_max_avg_interval = Duration::from_secs(20); + // Less than actual + let heartbeat_max_time_since_last = Duration::from_secs(40); + + // Arbitrary values + let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { + old_heartbeat_penalty: 5.6, + low_heartbeat_frequency_penalty: 20.45, + }; + + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + view_provider: FakeDB, + tx_pool: FakeTxPool, + next_block_height: FakeBlockImporter.next_block_height(), + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval, + heartbeat_max_time_since_last, + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); + let mut watcher = StateWatcher::from(watch_receiver); + + // when + // we run this in a loop to ensure that the task is run until it reports + let (report_peer_id, report, reporting_service) = tokio::time::timeout( + Duration::from_secs(1), + wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), + ) + .await + .unwrap(); + + // then + watch_sender.send(State::Stopped).unwrap(); + + assert_eq!( + FuelPeerId::from(peer_id.to_bytes().to_vec()), + report_peer_id + ); + assert_eq!( + report, + heartbeat_peer_reputation_config.old_heartbeat_penalty + ); + assert_eq!(reporting_service, "p2p"); +} + +async fn wait_until_report_received( + report_receiver: &mut Receiver<(FuelPeerId, AppScore, String)>, + task: &mut Task, + watcher: &mut StateWatcher, +) -> (FuelPeerId, AppScore, String) { + loop { + task.run(watcher).await; + if let Ok((peer_id, recv_report, service)) = report_receiver.try_recv() { + return (peer_id, recv_report, service); + } + } +} + +#[tokio::test] +async fn should_process_all_imported_block_under_infinite_events_from_p2p() { + // Given + let (blocks_processed_sender, mut block_processed_receiver) = mpsc::channel(1); + let next_block_height = Box::pin(futures::stream::repeat_with(move || { + blocks_processed_sender.try_send(()).unwrap(); + BlockHeight::from(0) + })); + let infinite_event_stream = Box::pin(futures::stream::empty()); + let p2p_service = FakeP2PService { + peer_info: vec![], + next_event_stream: infinite_event_stream, + }; + + // Initialization + let (request_sender, request_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: mpsc::channel(100).0, + confirmation_gossip_broadcast: mpsc::channel(100).0, + }; + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + tx_pool: FakeTxPool, + view_provider: FakeDB, + next_block_height, + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval: Default::default(), + heartbeat_max_time_since_last: Default::default(), + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: Default::default(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + let mut watcher = StateWatcher::started(); + // End of initialization + + for _ in 0..100 { + // When + task.run(&mut watcher).await; + + // Then + block_processed_receiver + .try_recv() + .expect("Should process the block height even under p2p pressure"); + } +} + +fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { + let peer_id = PeerId::random(); + let message_id = vec![1, 2, 3, 4, 5].into(); + let topic_hash = TopicHash::from_raw(TX_CONFIRMATIONS_GOSSIP_TOPIC); + let confirmations = TxConfirmations::default_test_tx(); + let message = GossipsubMessage::Confirmations(confirmations); + FuelP2PEvent::GossipsubMessage { + peer_id, + message_id, + topic_hash, + message, + } +} + +#[tokio::test] +async fn run__gossip_message_from_p2p_service_is_broadcasted__tx_confirmations() { + // given + let gossip_message_event = arb_tx_confirmation_gossip_message(); + let events = vec![gossip_message_event.clone()]; + let event_stream = futures::stream::iter(events); + let p2p_service = FakeP2PService { + peer_info: vec![], + next_event_stream: Box::pin(event_stream), + }; + let (confirmations_sender, mut confirmations_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: mpsc::channel(100).0, + confirmation_gossip_broadcast: confirmations_sender, + }; + let (request_sender, request_receiver) = mpsc::channel(100); + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + view_provider: FakeDB, + next_block_height: FakeBlockImporter.next_block_height(), + tx_pool: FakeTxPool, + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval: Default::default(), + heartbeat_max_time_since_last: Default::default(), + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: Default::default(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + + // when + let mut watcher = StateWatcher::started(); + task.run(&mut watcher).await; + tokio::time::sleep(Duration::from_millis(100)).await; + + // then + let actual = confirmations_receiver.try_recv().unwrap().data.unwrap(); + let FuelP2PEvent::GossipsubMessage { message, .. } = gossip_message_event else { + panic!("Expected GossipsubMessage event"); + }; + let GossipsubMessage::Confirmations(expected) = message else { + panic!("Expected Confirmations message"); + }; + assert_eq!(expected, actual); +} From fb58c03f27ab6f3a7f7026bd9d6b511d43eef726 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 15:18:06 -0700 Subject: [PATCH 08/20] Update CHANGELOG --- .changes/added/2726.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 .changes/added/2726.md diff --git a/.changes/added/2726.md b/.changes/added/2726.md new file mode 100644 index 00000000000..a5afc3ba528 --- /dev/null +++ b/.changes/added/2726.md @@ -0,0 +1 @@ +Add a new gossip-sub message for transaction confirmations \ No newline at end of file From 4f35d552b2b80f0de8b09229a73ebbb5198fee51 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 23:25:38 -0700 Subject: [PATCH 09/20] Update the types to include key delagation and proper signatures --- crates/services/p2p/src/p2p_service/tests.rs | 12 +- .../p2p/src/service/broadcast_tests.rs | 2 +- crates/services/p2p/src/service/task_tests.rs | 2 +- crates/types/src/services/p2p.rs | 111 +++++++++++++++--- 4 files changed, 100 insertions(+), 27 deletions(-) diff --git a/crates/services/p2p/src/p2p_service/tests.rs b/crates/services/p2p/src/p2p_service/tests.rs index af6f10ddd4e..abf4d1faaf8 100644 --- a/crates/services/p2p/src/p2p_service/tests.rs +++ b/crates/services/p2p/src/p2p_service/tests.rs @@ -587,10 +587,6 @@ async fn peer_info_updates_work() { #[tokio::test] #[instrument] async fn gossipsub_broadcast_tx_with_accept__new_tx() { - tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .with_target(false) - .init(); for _ in 0..100 { tokio::time::timeout( Duration::from_secs(5), @@ -610,16 +606,12 @@ async fn gossipsub_broadcast_tx_with_accept__new_tx() { #[tokio::test] #[instrument] async fn gossipsub_broadcast_tx_with_accept__tx_confirmations() { - tracing_subscriber::fmt() - .with_max_level(tracing::Level::INFO) - .with_target(false) - .init(); for _ in 0..100 { tokio::time::timeout( Duration::from_secs(20), gossipsub_broadcast( GossipsubBroadcastRequest::Confirmations(Arc::new( - TxConfirmations::default_test_tx(), + TxConfirmations::default_test_confirmation(), )), GossipsubMessageAcceptance::Accept, None, @@ -657,7 +649,7 @@ async fn gossipsub_broadcast_tx_with_reject__tx_confirmations() { Duration::from_secs(5), gossipsub_broadcast( GossipsubBroadcastRequest::Confirmations(Arc::new( - TxConfirmations::default_test_tx(), + TxConfirmations::default_test_confirmation(), )), GossipsubMessageAcceptance::Reject, None, diff --git a/crates/services/p2p/src/service/broadcast_tests.rs b/crates/services/p2p/src/service/broadcast_tests.rs index 77ad54503d6..65efb470915 100644 --- a/crates/services/p2p/src/service/broadcast_tests.rs +++ b/crates/services/p2p/src/service/broadcast_tests.rs @@ -13,7 +13,7 @@ fn arb_shared_state() -> SharedState { async fn shared_state__broadcast__tx_confirmations() { // given let broadcast = arb_shared_state(); - let confirmations = TxConfirmations::default_test_tx(); + let confirmations = TxConfirmations::default_test_confirmation(); let confirmations_gossip_data = ConfirmationsGossipData { data: Some(confirmations.clone()), peer_id: FuelPeerId::from(PeerId::random().to_bytes().to_vec()), diff --git a/crates/services/p2p/src/service/task_tests.rs b/crates/services/p2p/src/service/task_tests.rs index 1821975d334..f65e1b001e9 100644 --- a/crates/services/p2p/src/service/task_tests.rs +++ b/crates/services/p2p/src/service/task_tests.rs @@ -506,7 +506,7 @@ fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { let peer_id = PeerId::random(); let message_id = vec![1, 2, 3, 4, 5].into(); let topic_hash = TopicHash::from_raw(TX_CONFIRMATIONS_GOSSIP_TOPIC); - let confirmations = TxConfirmations::default_test_tx(); + let confirmations = TxConfirmations::default_test_confirmation(); let message = GossipsubMessage::Confirmations(confirmations); FuelP2PEvent::GossipsubMessage { peer_id, diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 041ee754b42..2c459d6abbe 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -6,8 +6,18 @@ use serde::{ Serialize, }; +use super::txpool::ArcPoolTx; +#[cfg(feature = "serde")] +use super::txpool::PoolTransaction; use crate::{ - fuel_tx::Transaction, + fuel_crypto::{ + PublicKey, + Signature, + }, + fuel_tx::{ + Transaction, + TxId, + }, fuel_types::BlockHeight, }; use std::{ @@ -20,11 +30,7 @@ use std::{ str::FromStr, time::SystemTime, }; - -use super::txpool::ArcPoolTx; - -#[cfg(feature = "serde")] -use super::txpool::PoolTransaction; +use tai64::Tai64; /// Contains types and logic for Peer Reputation pub mod peer_reputation; @@ -75,21 +81,96 @@ pub type TransactionGossipData = GossipData; /// Transactions that have been confirmed by block producer pub type ConfirmationsGossipData = GossipData; -/// List of transactions that have been confirmed with block producer's signature +/// A value and an associated signature +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct Sealed { + /// The actual value + pub entity: Entity, + /// Seal + pub signature: Signature, +} + +/// A key that will be used to sign a pre-confirmations #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -pub struct TxConfirmations { - signature: String, - txs: Vec, +pub struct DelegatePreConfirmationKey { + /// The public key of the person who is allowed to create pre-confirmations. + public_key: PublicKey, + /// The time at which the key will expire. Used to indicate to the recipient which key + /// to use to verify the pre-confirmations--serves the second purpose of being a nonce of + /// each key + expiration: Tai64, +} + +/// A pre-confirmation is a message that is sent by the block producer to give the _final_ +/// status of a transaction +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct Preconfirmation { + /// The ID of the transaction that is being pre-confirmed + tx_id: TxId, + /// The status of the transaction that is being pre-confirmed + status: PreconfirmationStatus, +} + +/// Status of a transaction that has been pre-confirmed by block producer +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub enum PreconfirmationStatus { + /// Transaction was squeezed out by the tx pool + SqueezedOutByBlockProducer { + /// Reason the transaction was squeezed out + reason: String, + }, + /// Transaction has been confirmed and will be included in block_height + SuccessByBlockProducer { + /// The block height at which the transaction will be included + block_height: BlockHeight, + }, + /// Transaction will not be included in a block, rejected at `block_height` + FailureByBlockProducer { + /// The block height at which the transaction will be rejected + block_height: BlockHeight, + }, +} + +/// A collection of pre-confirmations that have been signed by a delegate +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct Preconfirmations { + /// The expiration time of the key used to sign + expiration: Tai64, + /// The transactions which have been pre-confirmed + preconfirmations: Vec, +} + +/// A signed key delegation +pub type SignedByBlockProducerDelegation = Sealed; + +/// A signed pre-confirmation +pub type SignedPreconfirmationByDelegate = Sealed; + +/// The possible messages sent by the parties pre-confirming transactinos +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub enum TxConfirmations { + /// Notification of key delegation + Delegate(SignedByBlockProducerDelegation), + /// Notification of pre-confirmations + Preconfirmations(SignedPreconfirmationByDelegate), } #[cfg(feature = "test-helpers")] impl TxConfirmations { /// Test helper for creating arbitrary, meaningless `TxConfirmations` data - pub fn default_test_tx() -> Self { - Self { - signature: "Not a real signature".to_string(), - txs: vec![Transaction::default_test_tx()], - } + pub fn default_test_confirmation() -> Self { + Self::Preconfirmations(SignedPreconfirmationByDelegate { + entity: Preconfirmations { + expiration: Tai64::UNIX_EPOCH, + preconfirmations: vec![Preconfirmation { + tx_id: TxId::default(), + status: PreconfirmationStatus::SuccessByBlockProducer { + block_height: BlockHeight::new(0), + }, + }], + }, + signature: Signature::default(), + }) } } From 143804d84c0de9ecf449cc46cc2f0142b5d46080 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 23:26:58 -0700 Subject: [PATCH 10/20] Rename type --- crates/services/p2p/src/gossipsub/messages.rs | 6 +++--- crates/services/p2p/src/p2p_service/tests.rs | 6 +++--- crates/services/p2p/src/service/broadcast_tests.rs | 4 ++-- crates/services/p2p/src/service/task_tests.rs | 4 ++-- crates/types/src/services/p2p.rs | 6 +++--- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/crates/services/p2p/src/gossipsub/messages.rs b/crates/services/p2p/src/gossipsub/messages.rs index 32175e93ce4..403aef284fa 100644 --- a/crates/services/p2p/src/gossipsub/messages.rs +++ b/crates/services/p2p/src/gossipsub/messages.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use fuel_core_types::fuel_tx::Transaction; -use fuel_core_types::services::p2p::TxConfirmations; +use fuel_core_types::services::p2p::PreConfirmationMessage; use serde::{ Deserialize, Serialize, @@ -22,11 +22,11 @@ pub enum GossipTopicTag { #[derive(Debug, Clone)] pub enum GossipsubBroadcastRequest { NewTx(Arc), - Confirmations(Arc), + Confirmations(Arc), } #[derive(Serialize, Deserialize, Debug, Clone)] pub enum GossipsubMessage { NewTx(Transaction), - Confirmations(TxConfirmations), + Confirmations(PreConfirmationMessage), } diff --git a/crates/services/p2p/src/p2p_service/tests.rs b/crates/services/p2p/src/p2p_service/tests.rs index abf4d1faaf8..5ad564ef916 100644 --- a/crates/services/p2p/src/p2p_service/tests.rs +++ b/crates/services/p2p/src/p2p_service/tests.rs @@ -48,8 +48,8 @@ use fuel_core_types::{ services::p2p::{ GossipsubMessageAcceptance, NetworkableTransactionPool, + PreConfirmationMessage, Transactions, - TxConfirmations, }, }; use futures::{ @@ -611,7 +611,7 @@ async fn gossipsub_broadcast_tx_with_accept__tx_confirmations() { Duration::from_secs(20), gossipsub_broadcast( GossipsubBroadcastRequest::Confirmations(Arc::new( - TxConfirmations::default_test_confirmation(), + PreConfirmationMessage::default_test_confirmation(), )), GossipsubMessageAcceptance::Accept, None, @@ -649,7 +649,7 @@ async fn gossipsub_broadcast_tx_with_reject__tx_confirmations() { Duration::from_secs(5), gossipsub_broadcast( GossipsubBroadcastRequest::Confirmations(Arc::new( - TxConfirmations::default_test_confirmation(), + PreConfirmationMessage::default_test_confirmation(), )), GossipsubMessageAcceptance::Reject, None, diff --git a/crates/services/p2p/src/service/broadcast_tests.rs b/crates/services/p2p/src/service/broadcast_tests.rs index 65efb470915..beae827c918 100644 --- a/crates/services/p2p/src/service/broadcast_tests.rs +++ b/crates/services/p2p/src/service/broadcast_tests.rs @@ -1,7 +1,7 @@ #![allow(non_snake_case)] use super::*; -use fuel_core_types::services::p2p::TxConfirmations; +use fuel_core_types::services::p2p::PreConfirmationMessage; fn arb_shared_state() -> SharedState { let config = Config::default("test network"); @@ -13,7 +13,7 @@ fn arb_shared_state() -> SharedState { async fn shared_state__broadcast__tx_confirmations() { // given let broadcast = arb_shared_state(); - let confirmations = TxConfirmations::default_test_confirmation(); + let confirmations = PreConfirmationMessage::default_test_confirmation(); let confirmations_gossip_data = ConfirmationsGossipData { data: Some(confirmations.clone()), peer_id: FuelPeerId::from(PeerId::random().to_bytes().to_vec()), diff --git a/crates/services/p2p/src/service/task_tests.rs b/crates/services/p2p/src/service/task_tests.rs index f65e1b001e9..7ed9021c5d8 100644 --- a/crates/services/p2p/src/service/task_tests.rs +++ b/crates/services/p2p/src/service/task_tests.rs @@ -15,7 +15,7 @@ use fuel_core_storage::Result as StorageResult; use fuel_core_types::{ blockchain::consensus::Genesis, fuel_types::BlockHeight, - services::p2p::TxConfirmations, + services::p2p::PreConfirmationMessage, }; use futures::FutureExt; use libp2p::gossipsub::TopicHash; @@ -506,7 +506,7 @@ fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { let peer_id = PeerId::random(); let message_id = vec![1, 2, 3, 4, 5].into(); let topic_hash = TopicHash::from_raw(TX_CONFIRMATIONS_GOSSIP_TOPIC); - let confirmations = TxConfirmations::default_test_confirmation(); + let confirmations = PreConfirmationMessage::default_test_confirmation(); let message = GossipsubMessage::Confirmations(confirmations); FuelP2PEvent::GossipsubMessage { peer_id, diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 2c459d6abbe..6f5ee411725 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -79,7 +79,7 @@ pub struct GossipData { pub type TransactionGossipData = GossipData; /// Transactions that have been confirmed by block producer -pub type ConfirmationsGossipData = GossipData; +pub type ConfirmationsGossipData = GossipData; /// A value and an associated signature #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] @@ -148,7 +148,7 @@ pub type SignedPreconfirmationByDelegate = Sealed; /// The possible messages sent by the parties pre-confirming transactinos #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -pub enum TxConfirmations { +pub enum PreConfirmationMessage { /// Notification of key delegation Delegate(SignedByBlockProducerDelegation), /// Notification of pre-confirmations @@ -156,7 +156,7 @@ pub enum TxConfirmations { } #[cfg(feature = "test-helpers")] -impl TxConfirmations { +impl PreConfirmationMessage { /// Test helper for creating arbitrary, meaningless `TxConfirmations` data pub fn default_test_confirmation() -> Self { Self::Preconfirmations(SignedPreconfirmationByDelegate { From c513c2599f5e1b60785373dbfa1c2f8f9be6f2cb Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Wed, 19 Feb 2025 23:45:24 -0700 Subject: [PATCH 11/20] Fix serde feature stuff --- crates/types/src/services/p2p.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 6f5ee411725..6c662e34021 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -82,7 +82,8 @@ pub type TransactionGossipData = GossipData; pub type ConfirmationsGossipData = GossipData; /// A value and an associated signature -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Sealed { /// The actual value pub entity: Entity, @@ -91,7 +92,8 @@ pub struct Sealed { } /// A key that will be used to sign a pre-confirmations -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct DelegatePreConfirmationKey { /// The public key of the person who is allowed to create pre-confirmations. public_key: PublicKey, @@ -103,7 +105,8 @@ pub struct DelegatePreConfirmationKey { /// A pre-confirmation is a message that is sent by the block producer to give the _final_ /// status of a transaction -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Preconfirmation { /// The ID of the transaction that is being pre-confirmed tx_id: TxId, @@ -112,7 +115,8 @@ pub struct Preconfirmation { } /// Status of a transaction that has been pre-confirmed by block producer -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum PreconfirmationStatus { /// Transaction was squeezed out by the tx pool SqueezedOutByBlockProducer { @@ -132,7 +136,8 @@ pub enum PreconfirmationStatus { } /// A collection of pre-confirmations that have been signed by a delegate -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Preconfirmations { /// The expiration time of the key used to sign expiration: Tai64, @@ -147,7 +152,8 @@ pub type SignedByBlockProducerDelegation = Sealed; pub type SignedPreconfirmationByDelegate = Sealed; /// The possible messages sent by the parties pre-confirming transactinos -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum PreConfirmationMessage { /// Notification of key delegation Delegate(SignedByBlockProducerDelegation), From e5d041e4b92f45d2115b7f58ec764147f847a27a Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 20 Feb 2025 10:47:26 -0700 Subject: [PATCH 12/20] More name changes --- crates/services/p2p/src/codecs/postcard.rs | 6 +++--- crates/services/p2p/src/gossipsub/messages.rs | 6 +++--- crates/services/p2p/src/gossipsub/topics.rs | 4 ++-- crates/services/p2p/src/p2p_service/tests.rs | 12 ++++++------ crates/services/p2p/src/service.rs | 2 +- crates/services/p2p/src/service/task_tests.rs | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 2ce67e3e2c6..3484d944ee8 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -158,7 +158,7 @@ impl GossipsubCodec for PostcardCodec { fn encode(&self, data: Self::RequestMessage) -> Result, io::Error> { let encoded_data = match data { GossipsubBroadcastRequest::NewTx(tx) => postcard::to_stdvec(&*tx), - GossipsubBroadcastRequest::Confirmations(confirmations) => { + GossipsubBroadcastRequest::TxPreConfirmations(confirmations) => { postcard::to_stdvec(&*confirmations) } }; @@ -173,8 +173,8 @@ impl GossipsubCodec for PostcardCodec { ) -> Result { let decoded_response = match gossipsub_tag { GossipTopicTag::NewTx => GossipsubMessage::NewTx(deserialize(encoded_data)?), - GossipTopicTag::TxConfirmations => { - GossipsubMessage::Confirmations(deserialize(encoded_data)?) + GossipTopicTag::TxPreConfirmations => { + GossipsubMessage::TxPreConfirmations(deserialize(encoded_data)?) } }; diff --git a/crates/services/p2p/src/gossipsub/messages.rs b/crates/services/p2p/src/gossipsub/messages.rs index 403aef284fa..c6f93d8eed0 100644 --- a/crates/services/p2p/src/gossipsub/messages.rs +++ b/crates/services/p2p/src/gossipsub/messages.rs @@ -13,7 +13,7 @@ use serde::{ #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum GossipTopicTag { NewTx, - TxConfirmations, + TxPreConfirmations, } /// Takes `Arc` and wraps it in a matching GossipsubBroadcastRequest @@ -22,11 +22,11 @@ pub enum GossipTopicTag { #[derive(Debug, Clone)] pub enum GossipsubBroadcastRequest { NewTx(Arc), - Confirmations(Arc), + TxPreConfirmations(Arc), } #[derive(Serialize, Deserialize, Debug, Clone)] pub enum GossipsubMessage { NewTx(Transaction), - Confirmations(PreConfirmationMessage), + TxPreConfirmations(PreConfirmationMessage), } diff --git a/crates/services/p2p/src/gossipsub/topics.rs b/crates/services/p2p/src/gossipsub/topics.rs index fa726768bbe..7ee2b1da62a 100644 --- a/crates/services/p2p/src/gossipsub/topics.rs +++ b/crates/services/p2p/src/gossipsub/topics.rs @@ -42,7 +42,7 @@ impl GossipsubTopics { match incoming_topic { hash if hash == &self.new_tx_topic => Some(GossipTopicTag::NewTx), hash if hash == &self.tx_confirmations_topic => { - Some(GossipTopicTag::TxConfirmations) + Some(GossipTopicTag::TxPreConfirmations) } _ => None, } @@ -56,7 +56,7 @@ impl GossipsubTopics { ) -> TopicHash { match outgoing_request { GossipsubBroadcastRequest::NewTx(_) => self.new_tx_topic.clone(), - GossipsubBroadcastRequest::Confirmations(_) => { + GossipsubBroadcastRequest::TxPreConfirmations(_) => { self.tx_confirmations_topic.clone() } } diff --git a/crates/services/p2p/src/p2p_service/tests.rs b/crates/services/p2p/src/p2p_service/tests.rs index 5ad564ef916..8fc68d365c0 100644 --- a/crates/services/p2p/src/p2p_service/tests.rs +++ b/crates/services/p2p/src/p2p_service/tests.rs @@ -610,7 +610,7 @@ async fn gossipsub_broadcast_tx_with_accept__tx_confirmations() { tokio::time::timeout( Duration::from_secs(20), gossipsub_broadcast( - GossipsubBroadcastRequest::Confirmations(Arc::new( + GossipsubBroadcastRequest::TxPreConfirmations(Arc::new( PreConfirmationMessage::default_test_confirmation(), )), GossipsubMessageAcceptance::Accept, @@ -648,7 +648,7 @@ async fn gossipsub_broadcast_tx_with_reject__tx_confirmations() { tokio::time::timeout( Duration::from_secs(5), gossipsub_broadcast( - GossipsubBroadcastRequest::Confirmations(Arc::new( + GossipsubBroadcastRequest::TxPreConfirmations(Arc::new( PreConfirmationMessage::default_test_confirmation(), )), GossipsubMessageAcceptance::Reject, @@ -797,9 +797,9 @@ async fn gossipsub_broadcast( GossipsubBroadcastRequest::NewTx(_) => { (NEW_TX_GOSSIP_TOPIC, GossipTopicTag::NewTx) } - GossipsubBroadcastRequest::Confirmations(_) => ( + GossipsubBroadcastRequest::TxPreConfirmations(_) => ( TX_CONFIRMATIONS_GOSSIP_TOPIC, - GossipTopicTag::TxConfirmations, + GossipTopicTag::TxPreConfirmations, ), }; @@ -920,8 +920,8 @@ fn check_message_matches_request( assert_eq!(requested.deref(), received, "Both messages were `NewTx`s, but the received message did not match the requested message"); } ( - GossipsubMessage::Confirmations(received), - GossipsubBroadcastRequest::Confirmations(requested), + GossipsubMessage::TxPreConfirmations(received), + GossipsubBroadcastRequest::TxPreConfirmations(requested), ) => assert_eq!(requested.deref(), received, "Both messages were `Confirmations`, but the received message did not match the requested message"), _ => panic!("Message does not match the expected request, expected: {:?}, actual: {:?}", expected, message), } diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 6d88dfc3aba..bc2794033af 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -475,7 +475,7 @@ impl Task { let next_transaction = GossipData::new(transaction, peer_id, message_id); let _ = self.broadcast.tx_broadcast(next_transaction); } - GossipsubMessage::Confirmations(confirmations) => { + GossipsubMessage::TxPreConfirmations(confirmations) => { let data = GossipData::new(confirmations, peer_id, message_id); let _ = self.broadcast.confirmations_broadcast(data); } diff --git a/crates/services/p2p/src/service/task_tests.rs b/crates/services/p2p/src/service/task_tests.rs index 7ed9021c5d8..a15280d6c89 100644 --- a/crates/services/p2p/src/service/task_tests.rs +++ b/crates/services/p2p/src/service/task_tests.rs @@ -507,7 +507,7 @@ fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { let message_id = vec![1, 2, 3, 4, 5].into(); let topic_hash = TopicHash::from_raw(TX_CONFIRMATIONS_GOSSIP_TOPIC); let confirmations = PreConfirmationMessage::default_test_confirmation(); - let message = GossipsubMessage::Confirmations(confirmations); + let message = GossipsubMessage::TxPreConfirmations(confirmations); FuelP2PEvent::GossipsubMessage { peer_id, message_id, @@ -564,7 +564,7 @@ async fn run__gossip_message_from_p2p_service_is_broadcasted__tx_confirmations() let FuelP2PEvent::GossipsubMessage { message, .. } = gossip_message_event else { panic!("Expected GossipsubMessage event"); }; - let GossipsubMessage::Confirmations(expected) = message else { + let GossipsubMessage::TxPreConfirmations(expected) = message else { panic!("Expected Confirmations message"); }; assert_eq!(expected, actual); From c78700af20b451e70bafc45055bd8be6f0cb83ac Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 20 Feb 2025 14:34:51 -0700 Subject: [PATCH 13/20] Add boolean cli arg to control if subscribed to pre-confirmations --- bin/fuel-core/src/cli/run/p2p.rs | 5 +++++ crates/services/p2p/src/config.rs | 5 +++++ crates/services/p2p/src/gossipsub/config.rs | 11 +++++------ crates/services/p2p/src/p2p_service/tests.rs | 2 ++ 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/bin/fuel-core/src/cli/run/p2p.rs b/bin/fuel-core/src/cli/run/p2p.rs index f9e372c65d9..a4f8ebac4db 100644 --- a/bin/fuel-core/src/cli/run/p2p.rs +++ b/bin/fuel-core/src/cli/run/p2p.rs @@ -197,6 +197,10 @@ pub struct P2PArgs { /// Number of threads to read from the tx pool. #[clap(long = "p2p-txpool-threads", default_value = "0", env)] pub tx_pool_threads: usize, + + /// Subscribe to pre-confirmation gossip topic + #[clap(long = "subscribe-to-pre-confirmations", env)] + subscribe_to_pre_confirmations: bool, } #[derive(Debug, Clone, Args)] @@ -355,6 +359,7 @@ impl P2PArgs { database_read_threads: self.database_read_threads, tx_pool_threads: self.tx_pool_threads, state: NotInitialized, + subscribe_to_pre_confirmations: self.subscribe_to_pre_confirmations, }; Ok(Some(config)) } diff --git a/crates/services/p2p/src/config.rs b/crates/services/p2p/src/config.rs index 5603b357402..0540024e3d9 100644 --- a/crates/services/p2p/src/config.rs +++ b/crates/services/p2p/src/config.rs @@ -140,6 +140,9 @@ pub struct Config { /// with the `NotInitialized` state. But it can be set into the `Initialized` state only with /// the `init` method. pub state: State, + + /// If true, the node will subscribe to pre-confirmations topic + pub subscribe_to_pre_confirmations: bool, } /// The initialized state can be achieved only by the `init` function because `()` is private. @@ -189,6 +192,7 @@ impl Config { database_read_threads: self.database_read_threads, tx_pool_threads: self.tx_pool_threads, state: Initialized(()), + subscribe_to_pre_confirmations: self.subscribe_to_pre_confirmations, }) } } @@ -242,6 +246,7 @@ impl Config { database_read_threads: 0, tx_pool_threads: 0, state: NotInitialized, + subscribe_to_pre_confirmations: false, } } } diff --git a/crates/services/p2p/src/gossipsub/config.rs b/crates/services/p2p/src/gossipsub/config.rs index fd7a7b7fc86..69314ff0065 100644 --- a/crates/services/p2p/src/gossipsub/config.rs +++ b/crates/services/p2p/src/gossipsub/config.rs @@ -227,14 +227,13 @@ fn initialize_gossipsub(gossipsub: &mut gossipsub::Behaviour, p2p_config: &Confi .with_peer_score(peer_score_params, peer_score_thresholds) .expect("gossipsub initialized with peer score"); - // TODO: Make topics configurable. - let topics = vec![ - (NEW_TX_GOSSIP_TOPIC, NEW_TX_GOSSIP_WEIGHT), - ( + let mut topics = vec![(NEW_TX_GOSSIP_TOPIC, NEW_TX_GOSSIP_WEIGHT)]; + if p2p_config.subscribe_to_pre_confirmations { + topics.push(( TX_CONFIRMATIONS_GOSSIP_TOPIC, TX_CONFIRMATIONS_GOSSIP_WEIGHT, - ), - ]; + )); + } // subscribe to gossipsub topics with the network name suffix for (topic, weight) in topics { diff --git a/crates/services/p2p/src/p2p_service/tests.rs b/crates/services/p2p/src/p2p_service/tests.rs index 8fc68d365c0..0dcacf7898e 100644 --- a/crates/services/p2p/src/p2p_service/tests.rs +++ b/crates/services/p2p/src/p2p_service/tests.rs @@ -792,6 +792,8 @@ async fn gossipsub_broadcast( p2p_config.max_gossipsub_peers_connected = connection_limit; } + p2p_config.subscribe_to_pre_confirmations = true; + let (selected_topic, selected_tag): (Sha256Topic, GossipTopicTag) = { let (topic, tag) = match broadcast_request { GossipsubBroadcastRequest::NewTx(_) => { From 94c4ae2eb6980afe70730e6884fb15c095081e57 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 20 Feb 2025 15:02:56 -0700 Subject: [PATCH 14/20] Complete merge --- crates/services/p2p/src/codecs/gossipsub.rs | 26 +- crates/services/p2p/src/codecs/postcard.rs | 93 +- crates/services/p2p/src/p2p_service.rs | 1332 ------------------ crates/services/p2p/src/p2p_service/tests.rs | 32 +- crates/services/p2p/src/service.rs | 499 ------- 5 files changed, 44 insertions(+), 1938 deletions(-) diff --git a/crates/services/p2p/src/codecs/gossipsub.rs b/crates/services/p2p/src/codecs/gossipsub.rs index 2dc7ce96bf1..fe44b2ef04b 100644 --- a/crates/services/p2p/src/codecs/gossipsub.rs +++ b/crates/services/p2p/src/codecs/gossipsub.rs @@ -1,12 +1,16 @@ -use std::io; - -use fuel_core_types::fuel_tx::Transaction; - use crate::gossipsub::messages::{ GossipTopicTag, GossipsubBroadcastRequest, GossipsubMessage, }; +use fuel_core_types::{ + fuel_tx::Transaction, + services::p2p::PreConfirmationMessage, +}; +use std::{ + io, + ops::Deref, +}; use super::{ Decode, @@ -22,8 +26,10 @@ pub struct GossipsubMessageHandler { impl GossipsubCodec for GossipsubMessageHandler where - Codec: - Encode + Decode, + Codec: Encode + + Decode + + Encode + + Decode, { type RequestMessage = GossipsubBroadcastRequest; type ResponseMessage = GossipsubMessage; @@ -31,7 +37,10 @@ where fn encode(&self, data: Self::RequestMessage) -> Result, io::Error> { match data { GossipsubBroadcastRequest::NewTx(tx) => { - Ok(self.codec.encode(&tx)?.into_bytes()) + Ok(self.codec.encode(tx.deref())?.into_bytes()) + } + GossipsubBroadcastRequest::TxPreConfirmations(msg) => { + Ok(self.codec.encode(msg.deref())?.into_bytes()) } } } @@ -45,6 +54,9 @@ where GossipTopicTag::NewTx => { GossipsubMessage::NewTx(self.codec.decode(encoded_data)?) } + GossipTopicTag::TxPreConfirmations => { + GossipsubMessage::TxPreConfirmations(self.codec.decode(encoded_data)?) + } }; Ok(decoded_response) diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 56796bdf2b5..8d10e3a3cd0 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -39,7 +39,10 @@ impl Encode for PostcardCodec where T: ?Sized + serde::Serialize, { - type Encoder<'a> = Cow<'a, [u8]> where T: 'a; + type Encoder<'a> + = Cow<'a, [u8]> + where + T: 'a; type Error = io::Error; fn encode<'a>(&self, value: &'a T) -> Result, Self::Error> { @@ -49,94 +52,6 @@ where } } - async fn read_response( - &mut self, - protocol: &Self::Protocol, - socket: &mut T, - ) -> io::Result - where - T: AsyncRead + Unpin + Send, - { - let mut response = Vec::new(); - socket - .take(self.max_response_size as u64) - .read_to_end(&mut response) - .await?; - - match protocol { - PostcardProtocol::V1 => { - let v1_response = deserialize::(&response)?; - Ok(v1_response.into()) - } - PostcardProtocol::V2 => deserialize::(&response), - } - } - - async fn write_request( - &mut self, - _protocol: &Self::Protocol, - socket: &mut T, - req: Self::Request, - ) -> io::Result<()> - where - T: futures::AsyncWrite + Unpin + Send, - { - let encoded_data = serialize(&req)?; - socket.write_all(&encoded_data).await?; - Ok(()) - } - - async fn write_response( - &mut self, - protocol: &Self::Protocol, - socket: &mut T, - res: Self::Response, - ) -> io::Result<()> - where - T: futures::AsyncWrite + Unpin + Send, - { - let encoded_data = match protocol { - PostcardProtocol::V1 => { - let v1_response: V1ResponseMessage = res.into(); - serialize(&v1_response)? - } - PostcardProtocol::V2 => serialize(&res)?, - }; - socket.write_all(&encoded_data).await?; - Ok(()) - } -} - -impl GossipsubCodec for PostcardCodec { - type RequestMessage = GossipsubBroadcastRequest; - type ResponseMessage = GossipsubMessage; - - fn encode(&self, data: Self::RequestMessage) -> Result, io::Error> { - let encoded_data = match data { - GossipsubBroadcastRequest::NewTx(tx) => postcard::to_stdvec(&*tx), - GossipsubBroadcastRequest::TxPreConfirmations(confirmations) => { - postcard::to_stdvec(&*confirmations) - } - }; - - encoded_data.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) - } - - fn decode( - &self, - encoded_data: &[u8], - gossipsub_tag: GossipTopicTag, - ) -> Result { - let decoded_response = match gossipsub_tag { - GossipTopicTag::NewTx => GossipsubMessage::NewTx(deserialize(encoded_data)?), - GossipTopicTag::TxPreConfirmations => { - GossipsubMessage::TxPreConfirmations(deserialize(encoded_data)?) - } - }; - - Ok(decoded_response) - } -} impl Decode for PostcardCodec where T: serde::de::DeserializeOwned, diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 97b0d1fd621..638cdc7c46d 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -903,1335 +903,3 @@ impl FuelP2PService { }) } } - -#[allow(clippy::cast_possible_truncation)] -#[cfg(test)] -mod tests { - use super::{ - FuelP2PService, - PublishError, - }; - use crate::{ - codecs::{ - gossipsub::GossipsubMessageHandler, - request_response::RequestResponseMessageHandler, - }, - config::Config, - gossipsub::{ - messages::{ - GossipsubBroadcastRequest, - GossipsubMessage, - }, - topics::NEW_TX_GOSSIP_TOPIC, - }, - p2p_service::FuelP2PEvent, - peer_manager::PeerInfo, - request_response::messages::{ - RequestMessage, - ResponseError, - ResponseSender, - V2ResponseMessage, - }, - service::to_message_acceptance, - }; - use fuel_core_types::{ - blockchain::{ - consensus::{ - poa::PoAConsensus, - Consensus, - }, - header::BlockHeader, - SealedBlockHeader, - }, - fuel_tx::{ - Transaction, - TransactionBuilder, - TxId, - UniqueIdentifier, - }, - fuel_types::ChainId, - services::p2p::{ - GossipsubMessageAcceptance, - NetworkableTransactionPool, - Transactions, - }, - }; - use futures::{ - future::join_all, - StreamExt, - }; - use libp2p::{ - gossipsub::{ - Sha256Topic, - Topic, - }, - identity::Keypair, - swarm::{ - ListenError, - SwarmEvent, - }, - Multiaddr, - PeerId, - }; - use rand::Rng; - use std::{ - collections::HashSet, - ops::Range, - sync::Arc, - time::Duration, - }; - use tokio::sync::{ - broadcast, - mpsc, - oneshot, - watch, - }; - use tracing_attributes::instrument; - - type P2PService = FuelP2PService; - - /// helper function for building FuelP2PService - async fn build_service_from_config(mut p2p_config: Config) -> P2PService { - p2p_config.keypair = Keypair::generate_secp256k1(); // change keypair for each Node - let max_block_size = p2p_config.max_block_size; - let (sender, _) = - broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); - - let mut service = FuelP2PService::new( - sender, - p2p_config, - GossipsubMessageHandler::new(), - RequestResponseMessageHandler::new(max_block_size), - ) - .await - .unwrap(); - service.start().await.unwrap(); - service - } - - async fn setup_bootstrap_nodes( - p2p_config: &Config, - bootstrap_nodes_count: usize, - ) -> (Vec, Vec) { - let nodes = join_all( - (0..bootstrap_nodes_count) - .map(|_| build_service_from_config(p2p_config.clone())), - ) - .await; - let bootstrap_multiaddrs = nodes - .iter() - .flat_map(|b| b.multiaddrs()) - .collect::>(); - (nodes, bootstrap_multiaddrs) - } - - fn spawn(stop: &watch::Sender<()>, mut node: P2PService) { - let mut stop = stop.subscribe(); - tokio::spawn(async move { - loop { - tokio::select! { - _ = node.next_event() => {} - _ = stop.changed() => { - break; - } - } - } - }); - } - - #[tokio::test] - #[instrument] - async fn p2p_service_works() { - build_service_from_config(Config::default_initialized("p2p_service_works")).await; - } - - // Single sentry node connects to multiple reserved nodes and `max_peers_allowed` amount of non-reserved nodes. - // It also tries to dial extra non-reserved nodes to establish the connection. - // A single reserved node is not started immediately with the rest of the nodes. - // Once sentry node establishes the connection with the allowed number of nodes - // we start the reserved node, and await for it to establish the connection. - // This test proves that there is always an available slot for the reserved node to connect to. - #[tokio::test(flavor = "multi_thread")] - #[instrument] - async fn reserved_nodes_reconnect_works() { - let p2p_config = Config::default_initialized("reserved_nodes_reconnect_works"); - - // total amount will be `max_peers_allowed` + `reserved_nodes.len()` - let max_peers_allowed: usize = 3; - - let (bootstrap_nodes, bootstrap_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, max_peers_allowed.saturating_mul(5)).await; - let (mut reserved_nodes, reserved_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, max_peers_allowed).await; - - let mut sentry_node = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = max_peers_allowed as u32; - - p2p_config.bootstrap_nodes = bootstrap_multiaddrs; - - p2p_config.reserved_nodes = reserved_multiaddrs; - - build_service_from_config(p2p_config).await - }; - - // pop() a single reserved node, so it's not run with the rest of the nodes - let mut reserved_node = reserved_nodes.pop(); - let reserved_node_peer_id = reserved_node.as_ref().unwrap().local_peer_id; - - let all_node_services: Vec<_> = bootstrap_nodes - .into_iter() - .chain(reserved_nodes.into_iter()) - .collect(); - - let mut all_nodes_ids: Vec = all_node_services - .iter() - .map(|service| service.local_peer_id) - .collect(); - - let (stop_sender, _) = watch::channel(()); - all_node_services.into_iter().for_each(|node| { - spawn(&stop_sender, node); - }); - - loop { - tokio::select! { - sentry_node_event = sentry_node.next_event() => { - // we've connected to all other peers - if sentry_node.peer_manager.total_peers_connected() > max_peers_allowed { - // if the `reserved_node` is not included, - // create and insert it, to be polled with rest of the nodes - if !all_nodes_ids - .iter() - .any(|local_peer_id| local_peer_id == &reserved_node_peer_id) { - if let Some(node) = reserved_node { - all_nodes_ids.push(node.local_peer_id); - spawn(&stop_sender, node); - reserved_node = None; - } - } - } - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { - // we connected to the desired reserved node - if peer_id == reserved_node_peer_id { - break - } - } - }, - } - } - stop_sender.send(()).unwrap(); - } - - #[tokio::test] - #[instrument] - async fn dont_connect_to_node_with_same_peer_id() { - let mut p2p_config = - Config::default_initialized("dont_connect_to_node_with_same_peer_id"); - let mut node_a = build_service_from_config(p2p_config.clone()).await; - // We don't use build_service_from_config here, because we want to use the same keypair - // to have the same PeerId - let node_b = { - // Given - p2p_config.reserved_nodes = node_a.multiaddrs(); - let max_block_size = p2p_config.max_block_size; - let (sender, _) = - broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); - - let mut service = FuelP2PService::new( - sender, - p2p_config, - GossipsubMessageHandler::new(), - RequestResponseMessageHandler::new(max_block_size), - ) - .await - .unwrap(); - service.start().await.unwrap(); - service - }; - // When - tokio::time::timeout(Duration::from_secs(5), async move { - loop { - let event = node_a.next_event().await; - if let Some(FuelP2PEvent::PeerConnected(_)) = event { - panic!("Node B should not connect to Node A because they have the same PeerId"); - } - assert_eq!(node_a.peer_manager().total_peers_connected(), 0); - } - }) - .await - // Then - .expect_err("The node should not connect to itself"); - assert_eq!(node_b.peer_manager().total_peers_connected(), 0); - } - - // We start with two nodes, node_a and node_b, bootstrapped with `bootstrap_nodes_count` other nodes. - // Yet node_a and node_b are only allowed to connect to specified amount of nodes. - #[tokio::test] - #[instrument] - async fn max_peers_connected_works() { - let p2p_config = Config::default_initialized("max_peers_connected_works"); - - let bootstrap_nodes_count = 20; - let node_a_max_peers_allowed: usize = 3; - let node_b_max_peers_allowed: usize = 5; - - let (mut nodes, nodes_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, bootstrap_nodes_count).await; - - // this node is allowed to only connect to `node_a_max_peers_allowed` other nodes - let mut node_a = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = node_a_max_peers_allowed as u32; - // it still tries to dial all nodes! - p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); - - build_service_from_config(p2p_config).await - }; - - // this node is allowed to only connect to `node_b_max_peers_allowed` other nodes - let mut node_b = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = node_b_max_peers_allowed as u32; - // it still tries to dial all nodes! - p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); - - build_service_from_config(p2p_config).await - }; - - let (tx, mut rx) = tokio::sync::oneshot::channel::<()>(); - let jh = tokio::spawn(async move { - while rx.try_recv().is_err() { - futures::stream::iter(nodes.iter_mut()) - .for_each_concurrent(4, |node| async move { - node.next_event().await; - }) - .await; - } - }); - - let mut node_a_hit_limit = false; - let mut node_b_hit_limit = false; - let mut instance = tokio::time::Instant::now(); - - // After we hit limit for node_a and node_b start timer. - // If we don't exceed the limit during 5 seconds, finish the test successfully. - while instance.elapsed().as_secs() < 5 { - tokio::select! { - event_from_node_a = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_a { - if node_a.peer_manager().total_peers_connected() > node_a_max_peers_allowed { - panic!("The node should only connect to max {node_a_max_peers_allowed} peers"); - } - node_a_hit_limit |= node_a.peer_manager().total_peers_connected() == node_a_max_peers_allowed; - } - tracing::info!("Event from the node_a: {:?}", event_from_node_a); - }, - event_from_node_b = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_b { - if node_b.peer_manager().total_peers_connected() > node_b_max_peers_allowed { - panic!("The node should only connect to max {node_b_max_peers_allowed} peers"); - } - node_b_hit_limit |= node_b.peer_manager().total_peers_connected() == node_b_max_peers_allowed; - } - tracing::info!("Event from the node_b: {:?}", event_from_node_b); - }, - } - - if !(node_a_hit_limit && node_b_hit_limit) { - instance = tokio::time::Instant::now(); - } - } - - tx.send(()).unwrap(); - jh.await.unwrap() - } - - // Simulate 2 Sets of Sentry nodes. - // In both Sets, a single Guarded Node should only be connected to their sentry nodes. - // While other nodes can and should connect to nodes outside of the Sentry Set. - #[tokio::test(flavor = "multi_thread")] - #[instrument] - async fn sentry_nodes_working() { - const RESERVED_NODE_SIZE: usize = 4; - - let mut p2p_config = Config::default_initialized("sentry_nodes_working"); - - async fn build_sentry_nodes(p2p_config: Config) -> (P2PService, Vec) { - let (reserved_nodes, reserved_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, RESERVED_NODE_SIZE).await; - - // set up the guraded node service with `reserved_nodes_only_mode` - let guarded_node_service = { - let mut p2p_config = p2p_config.clone(); - p2p_config.reserved_nodes = reserved_multiaddrs; - p2p_config.reserved_nodes_only_mode = true; - build_service_from_config(p2p_config).await - }; - - let sentry_nodes = reserved_nodes; - - (guarded_node_service, sentry_nodes) - } - - let (mut first_guarded_node, mut first_sentry_nodes) = - build_sentry_nodes(p2p_config.clone()).await; - p2p_config.bootstrap_nodes = first_sentry_nodes - .iter() - .flat_map(|n| n.multiaddrs()) - .collect(); - - let (mut second_guarded_node, second_sentry_nodes) = - build_sentry_nodes(p2p_config).await; - - let first_sentry_set: HashSet<_> = first_sentry_nodes - .iter() - .map(|node| node.local_peer_id) - .collect(); - - let second_sentry_set: HashSet<_> = second_sentry_nodes - .iter() - .map(|node| node.local_peer_id) - .collect(); - - let mut single_sentry_node = first_sentry_nodes.pop().unwrap(); - let mut sentry_node_connections = HashSet::new(); - let (stop_sender, _) = watch::channel(()); - first_sentry_nodes - .into_iter() - .chain(second_sentry_nodes.into_iter()) - .for_each(|node| { - spawn(&stop_sender, node); - }); - - let mut instance = tokio::time::Instant::now(); - // After guards are connected to all sentries and at least one sentry has - // more connections than sentries in the group, start the timer.. - // If guards don't connected to new nodes during 5 seconds, finish the test successfully. - while instance.elapsed().as_secs() < 5 { - tokio::select! { - event_from_first_guarded = first_guarded_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_first_guarded { - if !first_sentry_set.contains(&peer_id) { - panic!("The node should only connect to the specified reserved nodes!"); - } - } - tracing::info!("Event from the first guarded node: {:?}", event_from_first_guarded); - }, - event_from_second_guarded = second_guarded_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_second_guarded { - if !second_sentry_set.contains(&peer_id) { - panic!("The node should only connect to the specified reserved nodes!"); - } - } - tracing::info!("Event from the second guarded node: {:?}", event_from_second_guarded); - }, - // Poll one of the reserved, sentry nodes - sentry_node_event = single_sentry_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { - sentry_node_connections.insert(peer_id); - } - } - }; - - // This reserved node has connected to more than the number of reserved nodes it is part of. - // It means it has discovered other nodes in the network. - if sentry_node_connections.len() < 2 * RESERVED_NODE_SIZE { - instance = tokio::time::Instant::now(); - } - } - stop_sender.send(()).unwrap(); - } - - // Simulates 2 p2p nodes that are on the same network and should connect via mDNS - // without any additional bootstrapping - #[tokio::test] - #[instrument] - async fn nodes_connected_via_mdns() { - // Node A - let mut p2p_config = Config::default_initialized("nodes_connected_via_mdns"); - p2p_config.enable_mdns = true; - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - let mut node_b = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - // successfully connected to Node A - break - } - tracing::info!("Node B Event: {:?}", node_b_event); - }, - _ = node_a.swarm.select_next_some() => {}, - }; - } - } - - // Simulates 2 p2p nodes that are on the same network but their Fuel Upgrade checksum is different - // (different chain id or chain config) - // So they are not able to connect - #[tokio::test] - #[instrument] - async fn nodes_cannot_connect_due_to_different_checksum() { - use libp2p::TransportError; - // Node A - let mut p2p_config = - Config::default_initialized("nodes_cannot_connect_due_to_different_checksum"); - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // different checksum - p2p_config.checksum = [1u8; 32].into(); - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - // Node B - let mut node_b = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_a_event = node_a.swarm.select_next_some() => { - tracing::info!("Node A Event: {:?}", node_a_event); - if let SwarmEvent::IncomingConnectionError { error: ListenError::Transport(TransportError::Other(_)), .. } = node_a_event { - break - } - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - panic!("Node B should not connect to Node A!") - } - tracing::info!("Node B Event: {:?}", node_b_event); - }, - - }; - } - } - - // Simulates 3 p2p nodes, Node B & Node C are bootstrapped with Node A - // Using Identify Protocol Node C should be able to identify and connect to Node B - #[tokio::test] - #[instrument] - async fn nodes_connected_via_identify() { - // Node A - let mut p2p_config = Config::default_initialized("nodes_connected_via_identify"); - - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - let mut node_c = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - tracing::info!("Node B Event: {:?}", node_b_event); - }, - - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = node_c_event { - // we have connected to Node B! - if peer_id == node_b.local_peer_id { - break - } - } - - tracing::info!("Node C Event: {:?}", node_c_event); - } - }; - } - } - - // Simulates 2 p2p nodes that connect to each other and consequently exchange Peer Info - // On successful connection, node B updates its latest BlockHeight - // and shares it with Peer A via Heartbeat protocol - #[tokio::test] - #[instrument] - async fn peer_info_updates_work() { - let mut p2p_config = Config::default_initialized("peer_info_updates_work"); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config).await; - - let latest_block_height = 40_u32.into(); - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if let Some(PeerInfo { heartbeat_data, client_version, .. }) = node_a.peer_manager.get_peer_info(&peer_id) { - // Exits after it verifies that: - // 1. Peer Addresses are known - // 2. Client Version is known - // 3. Node has responded with their latest BlockHeight - if client_version.is_some() && heartbeat_data.block_height == Some(latest_block_height) { - break; - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - // we've connected to Peer A - // let's update our BlockHeight - node_b.update_block_height(latest_block_height); - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - } - } - } - - #[tokio::test] - #[instrument] - async fn gossipsub_broadcast_tx_with_accept() { - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx(Arc::new( - Transaction::default_test_tx(), - )), - GossipsubMessageAcceptance::Accept, - None, - ), - ) - .await - .unwrap(); - } - } - - #[tokio::test] - #[instrument] - async fn gossipsub_broadcast_tx_with_reject() { - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx(Arc::new( - Transaction::default_test_tx(), - )), - GossipsubMessageAcceptance::Reject, - None, - ), - ) - .await - .unwrap(); - } - } - - #[tokio::test] - #[instrument] - #[ignore] - async fn gossipsub_scoring_with_accepted_messages() { - gossipsub_scoring_tester( - "gossipsub_scoring_with_accepted_messages", - 100, - GossipsubMessageAcceptance::Accept, - ) - .await; - } - - /// At `GRAYLIST_THRESHOLD` the node will ignore all messages from the peer - /// And our PeerManager will ban the peer at that point - leading to disconnect - #[tokio::test] - #[instrument] - #[ignore] - async fn gossipsub_scoring_with_rejected_messages() { - gossipsub_scoring_tester( - "gossipsub_scoring_with_rejected_messages", - 100, - GossipsubMessageAcceptance::Reject, - ) - .await; - } - - // TODO: Move me before tests that use this function - /// Helper function for testing gossipsub scoring - /// ! Dev Note: this function runs forever, its purpose is to show the scoring in action with passage of time - async fn gossipsub_scoring_tester( - test_name: &str, - amount_of_msgs_per_second: usize, - acceptance: GossipsubMessageAcceptance, - ) { - let mut p2p_config = Config::default_initialized(test_name); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - p2p_config.bootstrap_nodes = node_b.multiaddrs(); - let mut node_c = build_service_from_config(p2p_config.clone()).await; - - let mut interval = tokio::time::interval(Duration::from_secs(1)); - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_a_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_a.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - } - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_b_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - }, - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_c_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_c.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - }, - _ = interval.tick() => { - let mut transactions = vec![]; - for _ in 0..amount_of_msgs_per_second { - let random_tx = - TransactionBuilder::script(rand::thread_rng().gen::<[u8; 32]>().to_vec(), rand::thread_rng().gen::<[u8; 32]>().to_vec()).finalize_as_transaction(); - - transactions.push(random_tx.clone()); - let random_tx = GossipsubBroadcastRequest::NewTx(Arc::new(random_tx)); - - match rand::thread_rng().gen_range(1..=3) { - 1 => { - // Node A sends a Transaction - let _ = node_a.publish_message(random_tx); - - }, - 2 => { - // Node B sends a Transaction - let _ = node_b.publish_message(random_tx); - - }, - 3 => { - // Node C sends a Transaction - let _ = node_c.publish_message(random_tx); - }, - _ => unreachable!("Random number generator is broken") - } - } - - eprintln!("Node A WORLD VIEW"); - eprintln!("B score: {:?}", node_a.get_peer_score(&node_b.local_peer_id).unwrap()); - eprintln!("C score: {:?}", node_a.get_peer_score(&node_c.local_peer_id).unwrap()); - eprintln!(); - - eprintln!("Node B WORLD VIEW"); - eprintln!("A score: {:?}", node_b.get_peer_score(&node_a.local_peer_id).unwrap()); - eprintln!("C score: {:?}", node_b.get_peer_score(&node_c.local_peer_id).unwrap()); - eprintln!(); - - eprintln!("Node C WORLD VIEW"); - eprintln!("A score: {:?}", node_c.get_peer_score(&node_a.local_peer_id).unwrap()); - eprintln!("B score: {:?}", node_c.get_peer_score(&node_b.local_peer_id).unwrap()); - eprintln!(); - - // never ending loop - // break; - } - } - } - } - - // TODO: Move me before tests that use this function - /// Reusable helper function for Broadcasting Gossipsub requests - async fn gossipsub_broadcast( - broadcast_request: GossipsubBroadcastRequest, - acceptance: GossipsubMessageAcceptance, - connection_limit: Option, - ) { - let mut p2p_config = Config::default_initialized("gossipsub_exchanges_messages"); - - if let Some(connection_limit) = connection_limit { - p2p_config.max_gossipsub_peers_connected = connection_limit; - } - - let selected_topic: Sha256Topic = { - let topic = match broadcast_request { - GossipsubBroadcastRequest::NewTx(_) => NEW_TX_GOSSIP_TOPIC, - }; - - Topic::new(format!("{}/{}", topic, p2p_config.network_name)) - }; - - let mut message_sent = false; - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - p2p_config.bootstrap_nodes = node_b.multiaddrs(); - let mut node_c = build_service_from_config(p2p_config.clone()).await; - - // Node C does not connect to Node A - // it should receive the propagated message from Node B if `GossipsubMessageAcceptance` is `Accept` - node_c - .swarm - .behaviour_mut() - .block_peer(node_a.local_peer_id); - - let mut a_connected_to_b = false; - let mut b_connected_to_c = false; - loop { - // verifies that we've got at least a single peer address to send message to - if a_connected_to_b && b_connected_to_c && !message_sent { - message_sent = true; - let broadcast_request = broadcast_request.clone(); - node_a.publish_message(broadcast_request).unwrap(); - } - - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::NewSubscription { peer_id, .. }) = &node_a_event { - if peer_id == &node_b.local_peer_id { - a_connected_to_b = true; - } - } - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::NewSubscription { peer_id, .. }) = &node_b_event { - if peer_id == &node_c.local_peer_id { - b_connected_to_c = true; - } - } - - if let Some(FuelP2PEvent::GossipsubMessage { topic_hash, message, message_id, peer_id }) = node_b_event.clone() { - // Message Validation must be reported - // If it's `Accept`, Node B will propagate the message to Node C - // If it's `Ignore` or `Reject`, Node C should not receive anything - let msg_acceptance = to_message_acceptance(&acceptance); - node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); - if topic_hash != selected_topic.hash() { - tracing::error!("Wrong topic hash, expected: {} - actual: {}", selected_topic.hash(), topic_hash); - panic!("Wrong Topic"); - } - - // received value should match sent value - match &message { - GossipsubMessage::NewTx(tx) => { - if tx != &Transaction::default_test_tx() { - tracing::error!("Wrong p2p message {:?}", message); - panic!("Wrong GossipsubMessage") - } - } - } - - // Node B received the correct message - // If we try to publish it again we will get `PublishError::Duplicate` - // This asserts that our MessageId calculation is consistent irrespective of which Peer sends it - let broadcast_request = broadcast_request.clone(); - matches!(node_b.publish_message(broadcast_request), Err(PublishError::Duplicate)); - - match acceptance { - GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { - break - }, - _ => { - // the `exit` should happen in Node C - } - } - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { peer_id, .. }) = node_c_event.clone() { - // Node B should be the source propagator - assert!(peer_id == node_b.local_peer_id); - match acceptance { - GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { - panic!("Node C should not receive Rejected or Ignored messages") - }, - GossipsubMessageAcceptance::Accept => { - break - } - } - } - } - }; - } - } - - fn arbitrary_headers_for_range(range: Range) -> Vec { - let mut blocks = Vec::new(); - for i in range { - let mut header: BlockHeader = Default::default(); - header.set_block_height(i.into()); - - let sealed_block = SealedBlockHeader { - entity: header, - consensus: Consensus::PoA(PoAConsensus::new(Default::default())), - }; - blocks.push(sealed_block); - } - blocks - } - - // Metadata gets skipped during serialization, so this is the fuzzy way to compare blocks - fn eq_except_metadata(a: &SealedBlockHeader, b: &SealedBlockHeader) -> bool { - let app_eq = match (&a.entity, &b.entity) { - (BlockHeader::V1(a), BlockHeader::V1(b)) => { - a.application() == b.application() - } - #[cfg(feature = "fault-proving")] - (BlockHeader::V2(a), BlockHeader::V2(b)) => { - a.application() == b.application() - } - #[cfg_attr(not(feature = "fault-proving"), allow(unreachable_patterns))] - _ => false, - }; - app_eq && a.entity.consensus() == b.entity.consensus() - } - - async fn request_response_works_with( - request_msg: RequestMessage, - connection_limit: Option, - ) { - let mut p2p_config = Config::default_initialized("request_response_works_with"); - - if let Some(connection_limit) = connection_limit { - p2p_config.max_request_response_peers_connected = connection_limit; - } - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); - - let mut request_sent = false; - - loop { - tokio::select! { - message_sent = rx_test_end.recv() => { - // we received a signal to end the test - assert!(message_sent.unwrap(), "Received incorrect or missing message"); - break; - } - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - match request_msg.clone() { - RequestMessage::SealedHeaders(range) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - let expected = arbitrary_headers_for_range(range.clone()); - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(Ok(sealed_headers)))) => { - let check = expected.iter().zip(sealed_headers.iter()).all(|(a, b)| eq_except_metadata(a, b)); - let _ = tx_test_end.send(check).await; - }, - Ok((_, Ok(Err(e)))) => { - tracing::error!("Node A did not return any headers: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Ok((_, Err(e))) => { - tracing::error!("Error in P2P communication: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Err(e) => { - tracing::error!("Error in P2P before sending message: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - } - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::Transactions(_range) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::Transactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(Ok(transactions)))) => { - let check = transactions.len() == 1 && transactions[0].0.len() == 5; - let _ = tx_test_end.send(check).await; - }, - Ok((_, Ok(Err(e)))) => { - tracing::error!("Node A did not return any transactions: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Ok((_, Err(e))) => { - tracing::error!("Error in P2P communication: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Err(e) => { - tracing::error!("Error in P2P before sending message: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - } - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::TxPoolAllTransactionsIds => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolAllTransactionsIds(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok((_, Ok(Ok(transaction_ids)))) = response_message { - let tx_ids: Vec = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); - let check = transaction_ids.len() == 5 && transaction_ids.iter().zip(tx_ids.iter()).all(|(a, b)| a == b); - let _ = tx_test_end.send(check).await; - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::TxPoolFullTransactions(tx_ids) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolFullTransactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok((_, Ok(Ok(transactions)))) = response_message { - let txs: Vec> = tx_ids.iter().enumerate().map(|(i, _)| { - if i == 0 { - None - } else { - Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) - } - }).collect(); - let check = transactions.len() == tx_ids.len() && transactions.iter().zip(txs.iter()).all(|(a, b)| a == b); - let _ = tx_test_end.send(check).await; - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - } - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator - if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: received_request_message }) = &node_b_event { - match received_request_message { - RequestMessage::SealedHeaders(range) => { - let sealed_headers: Vec<_> = arbitrary_headers_for_range(range.clone()); - - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); - } - RequestMessage::Transactions(_) => { - let txs = (0..5).map(|_| Transaction::default_test_tx()).collect(); - let transactions = vec![Transactions(txs)]; - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::Transactions(Ok(transactions))); - } - RequestMessage::TxPoolAllTransactionsIds => { - let tx_ids = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolAllTransactionsIds(Ok(tx_ids))); - } - RequestMessage::TxPoolFullTransactions(tx_ids) => { - let txs = tx_ids.iter().enumerate().map(|(i, _)| { - if i == 0 { - None - } else { - Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) - } - }).collect(); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolFullTransactions(Ok(txs))); - } - } - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } - } - - #[tokio::test] - #[instrument] - async fn request_response_works_with_transactions() { - let arbitrary_range = 2..6; - request_response_works_with(RequestMessage::Transactions(arbitrary_range), None) - .await - } - - #[tokio::test] - #[instrument] - async fn request_response_works_with_sealed_headers_range_inclusive() { - let arbitrary_range = 2..6; - request_response_works_with(RequestMessage::SealedHeaders(arbitrary_range), None) - .await - } - - #[tokio::test] - #[instrument] - async fn request_response_works_with_transactions_ids() { - request_response_works_with(RequestMessage::TxPoolAllTransactionsIds, None).await - } - - #[tokio::test] - #[instrument] - async fn request_response_works_with_full_transactions() { - let tx_ids = (0..10) - .map(|_| Transaction::default_test_tx().id(&ChainId::new(1))) - .collect(); - request_response_works_with(RequestMessage::TxPoolFullTransactions(tx_ids), None) - .await - } - - /// We send a request for transactions, but it's responded by only headers - #[tokio::test] - #[instrument] - async fn invalid_response_type_is_detected() { - let mut p2p_config = - Config::default_initialized("invalid_response_type_is_detected"); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); - - let mut request_sent = false; - - loop { - tokio::select! { - message_sent = rx_test_end.recv() => { - // we received a signal to end the test - assert!(message_sent.unwrap(), "Received incorrect or missing message"); - break; - } - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, RequestMessage::Transactions(0..2), ResponseSender::Transactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(_))) => { - let _ = tx_test_end.send(false).await; - panic!("Request succeeded unexpectedly"); - }, - Ok((_, Err(ResponseError::TypeMismatch))) => { - // Got Invalid Response Type as expected, so end test - let _ = tx_test_end.send(true).await; - }, - Ok((_, Err(err))) => { - let _ = tx_test_end.send(false).await; - panic!("Unexpected error in P2P communication: {:?}", err); - }, - Err(e) => { - let _ = tx_test_end.send(false).await; - panic!("Error in P2P before sending message: {:?}", e); - }, - } - } else { - let _ = tx_test_end.send(false).await; - panic!("Orchestrator failed to receive a message: {:?}", response_message); - } - }); - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator - if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: _ }) = &node_b_event { - let sealed_headers: Vec<_> = arbitrary_headers_for_range(1..3); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } - } - - #[tokio::test] - #[instrument] - async fn req_res_outbound_timeout_works() { - let mut p2p_config = - Config::default_initialized("req_res_outbound_timeout_works"); - - // Node A - // setup request timeout to 1ms in order for the Request to fail - p2p_config.set_request_timeout = Duration::from_millis(1); - - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - p2p_config.set_request_timeout = Duration::from_secs(20); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = tokio::sync::mpsc::channel(1); - - // track the request sent in order to avoid duplicate sending - let mut request_sent = false; - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - // 1. Simulating Oneshot channel from the NetworkOrchestrator - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - - // 2a. there should be ZERO pending outbound requests in the table - assert_eq!(node_a.outbound_requests_table.len(), 0); - - // Request successfully sent - let requested_block_height = RequestMessage::SealedHeaders(0..0); - assert!(node_a.send_request_msg(None, requested_block_height, ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); - - // 2b. there should be ONE pending outbound requests in the table - assert_eq!(node_a.outbound_requests_table.len(), 1); - - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - // 3. Simulating NetworkOrchestrator receiving a Timeout Error Message! - let response_message = rx_orchestrator.await; - if let Ok(response) = response_message { - match response { - Ok((_, Ok(_))) => { - let _ = tx_test_end.send(false).await; - panic!("Request succeeded unexpectedly"); - }, - Ok((_, Err(ResponseError::P2P(_)))) => { - // Got Invalid Response Type as expected, so end test - let _ = tx_test_end.send(true).await; - }, - Ok((_, Err(err))) => { - let _ = tx_test_end.send(false).await; - panic!("Unexpected error in P2P communication: {:?}", err); - }, - Err(e) => { - let _ = tx_test_end.send(false).await; - panic!("Error in P2P before sending message: {:?}", e); - }, - } - } else { - let _ = tx_test_end.send(false).await; - panic!("Orchestrator failed to receive a message: {:?}", response_message); - } - }); - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - recv = rx_test_end.recv() => { - assert_eq!(recv, Some(true), "Test failed"); - // we received a signal to end the test - // 4. there should be ZERO pending outbound requests in the table - // after the Outbound Request Failed with Timeout - assert_eq!(node_a.outbound_requests_table.len(), 0); - break; - }, - // will not receive the request at all - node_b_event = node_b.next_event() => { - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } - } - - #[tokio::test] - async fn gossipsub_peer_limit_works() { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx(Arc::new( - Transaction::default_test_tx(), - )), - GossipsubMessageAcceptance::Accept, - Some(1) // limit to 1 peer, therefore the function will timeout, as it will not be able to propagate the message - ), - ) - .await.expect_err("Should have timed out"); - } - - #[tokio::test] - async fn request_response_peer_limit_works() { - let handle = tokio::spawn(async { - let arbitrary_range = 2..6; - - request_response_works_with( - RequestMessage::Transactions(arbitrary_range), - Some(0), // limit to 0 peers, - ) - .await; - }); - - let result = handle.await; - assert!(result.is_err()); - } -} diff --git a/crates/services/p2p/src/p2p_service/tests.rs b/crates/services/p2p/src/p2p_service/tests.rs index 0dcacf7898e..f239a5ad777 100644 --- a/crates/services/p2p/src/p2p_service/tests.rs +++ b/crates/services/p2p/src/p2p_service/tests.rs @@ -6,7 +6,6 @@ use super::{ PublishError, }; use crate::{ - codecs::postcard::PostcardCodec, config::Config, gossipsub::{ messages::{ @@ -19,7 +18,11 @@ use crate::{ TX_CONFIRMATIONS_GOSSIP_TOPIC, }, }, - p2p_service::FuelP2PEvent, + p2p_service::{ + FuelP2PEvent, + GossipsubMessageHandler, + RequestResponseMessageHandler, + }, peer_manager::PeerInfo, request_response::messages::{ RequestMessage, @@ -86,7 +89,6 @@ use tokio::sync::{ watch, }; use tracing_attributes::instrument; - type P2PService = FuelP2PService; /// helper function for building FuelP2PService @@ -96,10 +98,14 @@ async fn build_service_from_config(mut p2p_config: Config) -> P2PService { let (sender, _) = broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); - let mut service = - FuelP2PService::new(sender, p2p_config, PostcardCodec::new(max_block_size)) - .await - .unwrap(); + let mut service = FuelP2PService::new( + sender, + p2p_config, + GossipsubMessageHandler::new(), + RequestResponseMessageHandler::new(max_block_size), + ) + .await + .unwrap(); service.start().await.unwrap(); service } @@ -232,10 +238,14 @@ async fn dont_connect_to_node_with_same_peer_id() { let (sender, _) = broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); - let mut service = - FuelP2PService::new(sender, p2p_config, PostcardCodec::new(max_block_size)) - .await - .unwrap(); + let mut service = FuelP2PService::new( + sender, + p2p_config, + GossipsubMessageHandler::new(), + RequestResponseMessageHandler::new(max_block_size), + ) + .await + .unwrap(); service.start().await.unwrap(); service }; diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 8214df9923a..1ae76a20967 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -1454,502 +1454,3 @@ fn report_message( warn!(target: "fuel-p2p", "Failed to read PeerId from received GossipsubMessageId: {}", msg_id); } } - -#[cfg(test)] -pub mod tests { - #![allow(non_snake_case)] - use crate::ports::P2pDb; - - use super::*; - - use crate::peer_manager::heartbeat_data::HeartbeatData; - use fuel_core_services::{ - Service, - State, - }; - use fuel_core_storage::Result as StorageResult; - use fuel_core_types::{ - blockchain::consensus::Genesis, - fuel_types::BlockHeight, - }; - use futures::FutureExt; - use std::{ - collections::VecDeque, - time::SystemTime, - }; - - #[derive(Clone, Debug)] - struct FakeDb; - - impl AtomicView for FakeDb { - type LatestView = Self; - - fn latest_view(&self) -> StorageResult { - Ok(self.clone()) - } - } - - impl P2pDb for FakeDb { - fn get_sealed_headers( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - unimplemented!() - } - - fn get_transactions( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - unimplemented!() - } - - fn get_genesis(&self) -> StorageResult { - Ok(Default::default()) - } - } - - #[derive(Clone, Debug)] - struct FakeBlockImporter; - - impl BlockHeightImporter for FakeBlockImporter { - fn next_block_height(&self) -> BoxStream { - Box::pin(fuel_core_services::stream::pending()) - } - } - - #[derive(Clone, Debug)] - struct FakeTxPool; - - impl TxPool for FakeTxPool { - async fn get_tx_ids( - &self, - _max_txs: usize, - ) -> anyhow::Result> { - Ok(vec![]) - } - - async fn get_full_txs( - &self, - tx_ids: Vec, - ) -> anyhow::Result>> { - Ok(tx_ids.iter().map(|_| None).collect()) - } - } - - #[tokio::test] - async fn start_and_stop_awaits_works() { - let p2p_config = Config::::default("start_stop_works"); - let (shared_state, request_receiver) = build_shared_state(p2p_config.clone()); - let service = new_service( - ChainId::default(), - 0.into(), - p2p_config, - shared_state, - request_receiver, - FakeDb, - FakeBlockImporter, - FakeTxPool, - ); - - // Node with p2p service started - assert!(service.start_and_await().await.unwrap().started()); - // Node with p2p service stopped - assert!(service.stop_and_await().await.unwrap().stopped()); - } - - struct FakeP2PService { - peer_info: Vec<(PeerId, PeerInfo)>, - next_event_stream: BoxStream, - } - - impl TaskP2PService for FakeP2PService { - fn update_metrics(&self, _: T) - where - T: FnOnce(), - { - unimplemented!() - } - - fn get_all_peer_info(&self) -> Vec<(&PeerId, &PeerInfo)> { - self.peer_info.iter().map(|tup| (&tup.0, &tup.1)).collect() - } - - fn get_peer_id_with_height(&self, _height: &BlockHeight) -> Option { - todo!() - } - - fn next_event(&mut self) -> BoxFuture<'_, Option> { - self.next_event_stream.next().boxed() - } - - fn publish_message( - &mut self, - _message: GossipsubBroadcastRequest, - ) -> anyhow::Result<()> { - todo!() - } - - fn send_request_msg( - &mut self, - _peer_id: Option, - _request_msg: RequestMessage, - _on_response: ResponseSender, - ) -> anyhow::Result<()> { - todo!() - } - - fn send_response_msg( - &mut self, - _request_id: InboundRequestId, - _message: V2ResponseMessage, - ) -> anyhow::Result<()> { - todo!() - } - - fn report_message( - &mut self, - _message: GossipsubMessageInfo, - _acceptance: GossipsubMessageAcceptance, - ) -> anyhow::Result<()> { - todo!() - } - - fn report_peer( - &mut self, - _peer_id: PeerId, - _score: AppScore, - _reporting_service: &str, - ) -> anyhow::Result<()> { - todo!() - } - - fn update_block_height(&mut self, _height: BlockHeight) -> anyhow::Result<()> { - Ok(()) - } - } - - #[derive(Clone)] - struct FakeDB; - - impl AtomicView for FakeDB { - type LatestView = Self; - - fn latest_view(&self) -> StorageResult { - Ok(self.clone()) - } - } - - impl P2pDb for FakeDB { - fn get_sealed_headers( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - todo!() - } - - fn get_transactions( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - todo!() - } - - fn get_genesis(&self) -> StorageResult { - todo!() - } - } - - struct FakeBroadcast { - pub peer_reports: mpsc::Sender<(FuelPeerId, AppScore, String)>, - } - - impl Broadcast for FakeBroadcast { - fn report_peer( - &self, - peer_id: FuelPeerId, - report: AppScore, - reporting_service: &'static str, - ) -> anyhow::Result<()> { - self.peer_reports.try_send(( - peer_id, - report, - reporting_service.to_string(), - ))?; - Ok(()) - } - - fn block_height_broadcast( - &self, - _block_height_data: BlockHeightHeartbeatData, - ) -> anyhow::Result<()> { - todo!() - } - - fn tx_broadcast( - &self, - _transaction: TransactionGossipData, - ) -> anyhow::Result<()> { - todo!() - } - - fn new_tx_subscription_broadcast( - &self, - _peer_id: FuelPeerId, - ) -> anyhow::Result<()> { - todo!() - } - } - - #[tokio::test] - async fn peer_heartbeat_reputation_checks__slow_heartbeat_sends_reports() { - // given - let peer_id = PeerId::random(); - // more than limit - let last_duration = Duration::from_secs(30); - let mut durations = VecDeque::new(); - durations.push_front(last_duration); - - let heartbeat_data = HeartbeatData { - block_height: None, - last_heartbeat: Instant::now(), - last_heartbeat_sys: SystemTime::now(), - window: 0, - durations, - }; - let peer_info = PeerInfo { - peer_addresses: Default::default(), - client_version: None, - heartbeat_data, - score: 100.0, - }; - let peer_info = vec![(peer_id, peer_info)]; - let p2p_service = FakeP2PService { - peer_info, - next_event_stream: Box::pin(futures::stream::pending()), - }; - let (request_sender, request_receiver) = mpsc::channel(100); - - let (report_sender, mut report_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: report_sender, - }; - - // Less than actual - let heartbeat_max_avg_interval = Duration::from_secs(20); - // Greater than actual - let heartbeat_max_time_since_last = Duration::from_secs(40); - - // Arbitrary values - let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { - old_heartbeat_penalty: 5.6, - low_heartbeat_frequency_penalty: 20.45, - }; - - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - view_provider: FakeDB, - next_block_height: FakeBlockImporter.next_block_height(), - tx_pool: FakeTxPool, - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval, - heartbeat_max_time_since_last, - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); - let mut watcher = StateWatcher::from(watch_receiver); - - // when - let (report_peer_id, report, reporting_service) = tokio::time::timeout( - Duration::from_secs(1), - wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), - ) - .await - .unwrap(); - - // then - watch_sender.send(State::Stopped).unwrap(); - - assert_eq!( - FuelPeerId::from(peer_id.to_bytes().to_vec()), - report_peer_id - ); - assert_eq!( - report, - heartbeat_peer_reputation_config.low_heartbeat_frequency_penalty - ); - assert_eq!(reporting_service, "p2p"); - } - - #[tokio::test] - async fn peer_heartbeat_reputation_checks__old_heartbeat_sends_reports() { - // given - let peer_id = PeerId::random(); - // under the limit - let last_duration = Duration::from_secs(5); - let last_heartbeat = Instant::now() - Duration::from_secs(50); - let last_heartbeat_sys = SystemTime::now() - Duration::from_secs(50); - let mut durations = VecDeque::new(); - durations.push_front(last_duration); - - let heartbeat_data = HeartbeatData { - block_height: None, - last_heartbeat, - last_heartbeat_sys, - window: 0, - durations, - }; - let peer_info = PeerInfo { - peer_addresses: Default::default(), - client_version: None, - heartbeat_data, - score: 100.0, - }; - let peer_info = vec![(peer_id, peer_info)]; - let p2p_service = FakeP2PService { - peer_info, - next_event_stream: Box::pin(futures::stream::pending()), - }; - let (request_sender, request_receiver) = mpsc::channel(100); - - let (report_sender, mut report_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: report_sender, - }; - - // Greater than actual - let heartbeat_max_avg_interval = Duration::from_secs(20); - // Less than actual - let heartbeat_max_time_since_last = Duration::from_secs(40); - - // Arbitrary values - let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { - old_heartbeat_penalty: 5.6, - low_heartbeat_frequency_penalty: 20.45, - }; - - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - view_provider: FakeDB, - tx_pool: FakeTxPool, - next_block_height: FakeBlockImporter.next_block_height(), - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval, - heartbeat_max_time_since_last, - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); - let mut watcher = StateWatcher::from(watch_receiver); - - // when - // we run this in a loop to ensure that the task is run until it reports - let (report_peer_id, report, reporting_service) = tokio::time::timeout( - Duration::from_secs(1), - wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), - ) - .await - .unwrap(); - - // then - watch_sender.send(State::Stopped).unwrap(); - - assert_eq!( - FuelPeerId::from(peer_id.to_bytes().to_vec()), - report_peer_id - ); - assert_eq!( - report, - heartbeat_peer_reputation_config.old_heartbeat_penalty - ); - assert_eq!(reporting_service, "p2p"); - } - - async fn wait_until_report_received( - report_receiver: &mut Receiver<(FuelPeerId, AppScore, String)>, - task: &mut Task, - watcher: &mut StateWatcher, - ) -> (FuelPeerId, AppScore, String) { - loop { - let _ = task.run(watcher).await; - if let Ok((peer_id, recv_report, service)) = report_receiver.try_recv() { - return (peer_id, recv_report, service); - } - } - } - - #[tokio::test] - async fn should_process_all_imported_block_under_infinite_events_from_p2p() { - // Given - let (blocks_processed_sender, mut block_processed_receiver) = mpsc::channel(1); - let next_block_height = Box::pin(futures::stream::repeat_with(move || { - blocks_processed_sender.try_send(()).unwrap(); - BlockHeight::from(0) - })); - let infinite_event_stream = Box::pin(futures::stream::empty()); - let p2p_service = FakeP2PService { - peer_info: vec![], - next_event_stream: infinite_event_stream, - }; - - // Initialization - let (request_sender, request_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: mpsc::channel(100).0, - }; - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - tx_pool: FakeTxPool, - view_provider: FakeDB, - next_block_height, - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval: Default::default(), - heartbeat_max_time_since_last: Default::default(), - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: Default::default(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let mut watcher = StateWatcher::started(); - // End of initialization - - for _ in 0..100 { - // When - let _ = task.run(&mut watcher).await; - - // Then - block_processed_receiver - .try_recv() - .expect("Should process the block height even under p2p pressure"); - } - } -} From 89e44b096dc133725622b60d05b35d4737250676 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 20 Feb 2025 15:34:41 -0700 Subject: [PATCH 15/20] More renaming --- crates/services/p2p/src/gossipsub/config.rs | 8 ++++---- crates/services/p2p/src/gossipsub/topics.rs | 2 +- crates/services/p2p/src/p2p_service/tests.rs | 4 ++-- crates/services/p2p/src/service/task_tests.rs | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/services/p2p/src/gossipsub/config.rs b/crates/services/p2p/src/gossipsub/config.rs index 69314ff0065..9eb8c1dfa15 100644 --- a/crates/services/p2p/src/gossipsub/config.rs +++ b/crates/services/p2p/src/gossipsub/config.rs @@ -1,6 +1,6 @@ use super::topics::{ NEW_TX_GOSSIP_TOPIC, - TX_CONFIRMATIONS_GOSSIP_TOPIC, + TX_PRECONFIRMATIONS_GOSSIP_TOPIC, }; use crate::{ config::{ @@ -52,7 +52,7 @@ const MESH_SIZE: usize = 8; // The weight applied to the score for delivering new transactions. const NEW_TX_GOSSIP_WEIGHT: f64 = 0.05; -const TX_CONFIRMATIONS_GOSSIP_WEIGHT: f64 = 0.05; +const TX_PRECONFIRMATIONS_GOSSIP_WEIGHT: f64 = 0.05; // The threshold for a peer's score to be considered for greylisting. // If a peer's score falls below this value, they will be greylisted. @@ -230,8 +230,8 @@ fn initialize_gossipsub(gossipsub: &mut gossipsub::Behaviour, p2p_config: &Confi let mut topics = vec![(NEW_TX_GOSSIP_TOPIC, NEW_TX_GOSSIP_WEIGHT)]; if p2p_config.subscribe_to_pre_confirmations { topics.push(( - TX_CONFIRMATIONS_GOSSIP_TOPIC, - TX_CONFIRMATIONS_GOSSIP_WEIGHT, + TX_PRECONFIRMATIONS_GOSSIP_TOPIC, + TX_PRECONFIRMATIONS_GOSSIP_WEIGHT, )); } diff --git a/crates/services/p2p/src/gossipsub/topics.rs b/crates/services/p2p/src/gossipsub/topics.rs index 7ee2b1da62a..49859468f17 100644 --- a/crates/services/p2p/src/gossipsub/topics.rs +++ b/crates/services/p2p/src/gossipsub/topics.rs @@ -10,7 +10,7 @@ use super::messages::{ }; pub const NEW_TX_GOSSIP_TOPIC: &str = "new_tx"; -pub const TX_CONFIRMATIONS_GOSSIP_TOPIC: &str = "tx_confirmations"; +pub const TX_PRECONFIRMATIONS_GOSSIP_TOPIC: &str = "tx_confirmations"; /// Holds used Gossipsub Topics /// Each field contains TopicHash of existing topics diff --git a/crates/services/p2p/src/p2p_service/tests.rs b/crates/services/p2p/src/p2p_service/tests.rs index f239a5ad777..cb18a89f460 100644 --- a/crates/services/p2p/src/p2p_service/tests.rs +++ b/crates/services/p2p/src/p2p_service/tests.rs @@ -15,7 +15,7 @@ use crate::{ }, topics::{ NEW_TX_GOSSIP_TOPIC, - TX_CONFIRMATIONS_GOSSIP_TOPIC, + TX_PRECONFIRMATIONS_GOSSIP_TOPIC, }, }, p2p_service::{ @@ -810,7 +810,7 @@ async fn gossipsub_broadcast( (NEW_TX_GOSSIP_TOPIC, GossipTopicTag::NewTx) } GossipsubBroadcastRequest::TxPreConfirmations(_) => ( - TX_CONFIRMATIONS_GOSSIP_TOPIC, + TX_PRECONFIRMATIONS_GOSSIP_TOPIC, GossipTopicTag::TxPreConfirmations, ), }; diff --git a/crates/services/p2p/src/service/task_tests.rs b/crates/services/p2p/src/service/task_tests.rs index a15280d6c89..7c7b3e56a95 100644 --- a/crates/services/p2p/src/service/task_tests.rs +++ b/crates/services/p2p/src/service/task_tests.rs @@ -4,7 +4,7 @@ use crate::ports::P2pDb; use super::*; use crate::{ - gossipsub::topics::TX_CONFIRMATIONS_GOSSIP_TOPIC, + gossipsub::topics::TX_PRECONFIRMATIONS_GOSSIP_TOPIC, peer_manager::heartbeat_data::HeartbeatData, }; use fuel_core_services::{ @@ -505,7 +505,7 @@ async fn should_process_all_imported_block_under_infinite_events_from_p2p() { fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { let peer_id = PeerId::random(); let message_id = vec![1, 2, 3, 4, 5].into(); - let topic_hash = TopicHash::from_raw(TX_CONFIRMATIONS_GOSSIP_TOPIC); + let topic_hash = TopicHash::from_raw(TX_PRECONFIRMATIONS_GOSSIP_TOPIC); let confirmations = PreConfirmationMessage::default_test_confirmation(); let message = GossipsubMessage::TxPreConfirmations(confirmations); FuelP2PEvent::GossipsubMessage { From 6748d0cbe2a6d8a21c99e21ee798eadd6107815c Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Thu, 20 Feb 2025 15:39:38 -0700 Subject: [PATCH 16/20] Rename missed const --- crates/services/p2p/src/gossipsub/topics.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/services/p2p/src/gossipsub/topics.rs b/crates/services/p2p/src/gossipsub/topics.rs index 49859468f17..8e2d585bd38 100644 --- a/crates/services/p2p/src/gossipsub/topics.rs +++ b/crates/services/p2p/src/gossipsub/topics.rs @@ -26,8 +26,7 @@ impl GossipsubTopics { let new_tx_topic: Sha256Topic = Topic::new(format!("{NEW_TX_GOSSIP_TOPIC}/{network_name}")); let tx_confirmations_topic: Sha256Topic = - Topic::new(format!("{TX_CONFIRMATIONS_GOSSIP_TOPIC}/{network_name}")); - + Topic::new(format!("{TX_PRECONFIRMATIONS_GOSSIP_TOPIC}/{network_name}")); Self { new_tx_topic: new_tx_topic.hash(), tx_confirmations_topic: tx_confirmations_topic.hash(), From 5cbe2187d2ab0e359be79c824525268ae866e387 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Sun, 23 Feb 2025 12:06:20 -0700 Subject: [PATCH 17/20] Remove test refactor --- crates/services/p2p/src/p2p_service.rs | 1407 ++++++++++++++++- crates/services/p2p/src/p2p_service/tests.rs | 1392 ---------------- crates/services/p2p/src/service.rs | 626 +++++++- .../p2p/src/service/broadcast_tests.rs | 32 - crates/services/p2p/src/service/task_tests.rs | 571 ------- 5 files changed, 2024 insertions(+), 2004 deletions(-) delete mode 100644 crates/services/p2p/src/p2p_service/tests.rs delete mode 100644 crates/services/p2p/src/service/broadcast_tests.rs delete mode 100644 crates/services/p2p/src/service/task_tests.rs diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 638cdc7c46d..bb3e6533111 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -87,9 +87,6 @@ use tracing::{ warn, }; -#[cfg(test)] -mod tests; - /// Maximum amount of peer's addresses that we are ready to store per peer const MAX_IDENTIFY_ADDRESSES: usize = 10; @@ -903,3 +900,1407 @@ impl FuelP2PService { }) } } + +#[cfg(test)] +mod tests { + #![allow(non_snake_case)] + #![allow(clippy::cast_possible_truncation)] + + use super::{ + FuelP2PService, + PublishError, + }; + use crate::{ + config::Config, + gossipsub::{ + messages::{ + GossipTopicTag, + GossipsubBroadcastRequest, + GossipsubMessage, + }, + topics::{ + NEW_TX_GOSSIP_TOPIC, + TX_PRECONFIRMATIONS_GOSSIP_TOPIC, + }, + }, + p2p_service::{ + FuelP2PEvent, + GossipsubMessageHandler, + RequestResponseMessageHandler, + }, + peer_manager::PeerInfo, + request_response::messages::{ + RequestMessage, + ResponseError, + ResponseSender, + V2ResponseMessage, + }, + service::to_message_acceptance, + }; + use fuel_core_types::{ + blockchain::{ + consensus::{ + poa::PoAConsensus, + Consensus, + }, + header::BlockHeader, + SealedBlockHeader, + }, + fuel_tx::{ + Transaction, + TransactionBuilder, + TxId, + UniqueIdentifier, + }, + fuel_types::ChainId, + services::p2p::{ + GossipsubMessageAcceptance, + NetworkableTransactionPool, + PreConfirmationMessage, + Transactions, + }, + }; + use futures::{ + future::join_all, + StreamExt, + }; + use libp2p::{ + gossipsub::{ + Sha256Topic, + Topic, + }, + identity::Keypair, + swarm::{ + ListenError, + SwarmEvent, + }, + Multiaddr, + PeerId, + }; + use rand::Rng; + use std::{ + collections::HashSet, + ops::{ + Deref, + Range, + }, + sync::Arc, + time::Duration, + }; + use tokio::sync::{ + broadcast, + mpsc, + oneshot, + watch, + }; + use tracing_attributes::instrument; + type P2PService = FuelP2PService; + + /// helper function for building FuelP2PService + async fn build_service_from_config(mut p2p_config: Config) -> P2PService { + p2p_config.keypair = Keypair::generate_secp256k1(); // change keypair for each Node + let max_block_size = p2p_config.max_block_size; + let (sender, _) = + broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); + + let mut service = FuelP2PService::new( + sender, + p2p_config, + GossipsubMessageHandler::new(), + RequestResponseMessageHandler::new(max_block_size), + ) + .await + .unwrap(); + service.start().await.unwrap(); + service + } + + async fn setup_bootstrap_nodes( + p2p_config: &Config, + bootstrap_nodes_count: usize, + ) -> (Vec, Vec) { + let nodes = join_all( + (0..bootstrap_nodes_count) + .map(|_| build_service_from_config(p2p_config.clone())), + ) + .await; + let bootstrap_multiaddrs = nodes + .iter() + .flat_map(|b| b.multiaddrs()) + .collect::>(); + (nodes, bootstrap_multiaddrs) + } + + fn spawn(stop: &watch::Sender<()>, mut node: P2PService) { + let mut stop = stop.subscribe(); + tokio::spawn(async move { + loop { + tokio::select! { + _ = node.next_event() => {} + _ = stop.changed() => { + break; + } + } + } + }); + } + + #[tokio::test] + #[instrument] + async fn p2p_service_works() { + build_service_from_config(Config::default_initialized("p2p_service_works")).await; + } + + // Single sentry node connects to multiple reserved nodes and `max_peers_allowed` amount of non-reserved nodes. + // It also tries to dial extra non-reserved nodes to establish the connection. + // A single reserved node is not started immediately with the rest of the nodes. + // Once sentry node establishes the connection with the allowed number of nodes + // we start the reserved node, and await for it to establish the connection. + // This test proves that there is always an available slot for the reserved node to connect to. + #[tokio::test(flavor = "multi_thread")] + #[instrument] + async fn reserved_nodes_reconnect_works() { + let p2p_config = Config::default_initialized("reserved_nodes_reconnect_works"); + + // total amount will be `max_peers_allowed` + `reserved_nodes.len()` + let max_peers_allowed: usize = 3; + + let (bootstrap_nodes, bootstrap_multiaddrs) = + setup_bootstrap_nodes(&p2p_config, max_peers_allowed.saturating_mul(5)).await; + let (mut reserved_nodes, reserved_multiaddrs) = + setup_bootstrap_nodes(&p2p_config, max_peers_allowed).await; + + let mut sentry_node = { + let mut p2p_config = p2p_config.clone(); + p2p_config.max_discovery_peers_connected = max_peers_allowed as u32; + + p2p_config.bootstrap_nodes = bootstrap_multiaddrs; + + p2p_config.reserved_nodes = reserved_multiaddrs; + + build_service_from_config(p2p_config).await + }; + + // pop() a single reserved node, so it's not run with the rest of the nodes + let mut reserved_node = reserved_nodes.pop(); + let reserved_node_peer_id = reserved_node.as_ref().unwrap().local_peer_id; + + let all_node_services: Vec<_> = bootstrap_nodes + .into_iter() + .chain(reserved_nodes.into_iter()) + .collect(); + + let mut all_nodes_ids: Vec = all_node_services + .iter() + .map(|service| service.local_peer_id) + .collect(); + + let (stop_sender, _) = watch::channel(()); + all_node_services.into_iter().for_each(|node| { + spawn(&stop_sender, node); + }); + + loop { + tokio::select! { + sentry_node_event = sentry_node.next_event() => { + // we've connected to all other peers + if sentry_node.peer_manager.total_peers_connected() > max_peers_allowed { + // if the `reserved_node` is not included, + // create and insert it, to be polled with rest of the nodes + if !all_nodes_ids + .iter() + .any(|local_peer_id| local_peer_id == &reserved_node_peer_id) { + if let Some(node) = reserved_node { + all_nodes_ids.push(node.local_peer_id); + spawn(&stop_sender, node); + reserved_node = None; + } + } + } + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { + // we connected to the desired reserved node + if peer_id == reserved_node_peer_id { + break + } + } + }, + } + } + stop_sender.send(()).unwrap(); + } + + #[tokio::test] + #[instrument] + async fn dont_connect_to_node_with_same_peer_id() { + let mut p2p_config = + Config::default_initialized("dont_connect_to_node_with_same_peer_id"); + let mut node_a = build_service_from_config(p2p_config.clone()).await; + // We don't use build_service_from_config here, because we want to use the same keypair + // to have the same PeerId + let node_b = { + // Given + p2p_config.reserved_nodes = node_a.multiaddrs(); + let max_block_size = p2p_config.max_block_size; + let (sender, _) = + broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); + + let mut service = FuelP2PService::new( + sender, + p2p_config, + GossipsubMessageHandler::new(), + RequestResponseMessageHandler::new(max_block_size), + ) + .await + .unwrap(); + service.start().await.unwrap(); + service + }; + // When + tokio::time::timeout(Duration::from_secs(5), async move { + loop { + let event = node_a.next_event().await; + if let Some(FuelP2PEvent::PeerConnected(_)) = event { + panic!("Node B should not connect to Node A because they have the same PeerId"); + } + assert_eq!(node_a.peer_manager().total_peers_connected(), 0); + } + }) + .await + // Then + .expect_err("The node should not connect to itself"); + assert_eq!(node_b.peer_manager().total_peers_connected(), 0); + } + + // We start with two nodes, node_a and node_b, bootstrapped with `bootstrap_nodes_count` other nodes. + // Yet node_a and node_b are only allowed to connect to specified amount of nodes. + #[tokio::test] + #[instrument] + async fn max_peers_connected_works() { + let p2p_config = Config::default_initialized("max_peers_connected_works"); + + let bootstrap_nodes_count = 20; + let node_a_max_peers_allowed: usize = 3; + let node_b_max_peers_allowed: usize = 5; + + let (mut nodes, nodes_multiaddrs) = + setup_bootstrap_nodes(&p2p_config, bootstrap_nodes_count).await; + + // this node is allowed to only connect to `node_a_max_peers_allowed` other nodes + let mut node_a = { + let mut p2p_config = p2p_config.clone(); + p2p_config.max_discovery_peers_connected = node_a_max_peers_allowed as u32; + // it still tries to dial all nodes! + p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); + + build_service_from_config(p2p_config).await + }; + + // this node is allowed to only connect to `node_b_max_peers_allowed` other nodes + let mut node_b = { + let mut p2p_config = p2p_config.clone(); + p2p_config.max_discovery_peers_connected = node_b_max_peers_allowed as u32; + // it still tries to dial all nodes! + p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); + + build_service_from_config(p2p_config).await + }; + + let (tx, mut rx) = tokio::sync::oneshot::channel::<()>(); + let jh = tokio::spawn(async move { + while rx.try_recv().is_err() { + futures::stream::iter(nodes.iter_mut()) + .for_each_concurrent(4, |node| async move { + node.next_event().await; + }) + .await; + } + }); + + let mut node_a_hit_limit = false; + let mut node_b_hit_limit = false; + let mut instance = tokio::time::Instant::now(); + + // After we hit limit for node_a and node_b start timer. + // If we don't exceed the limit during 5 seconds, finish the test successfully. + while instance.elapsed().as_secs() < 5 { + tokio::select! { + event_from_node_a = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_a { + if node_a.peer_manager().total_peers_connected() > node_a_max_peers_allowed { + panic!("The node should only connect to max {node_a_max_peers_allowed} peers"); + } + node_a_hit_limit |= node_a.peer_manager().total_peers_connected() == node_a_max_peers_allowed; + } + tracing::info!("Event from the node_a: {:?}", event_from_node_a); + }, + event_from_node_b = node_b.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_b { + if node_b.peer_manager().total_peers_connected() > node_b_max_peers_allowed { + panic!("The node should only connect to max {node_b_max_peers_allowed} peers"); + } + node_b_hit_limit |= node_b.peer_manager().total_peers_connected() == node_b_max_peers_allowed; + } + tracing::info!("Event from the node_b: {:?}", event_from_node_b); + }, + } + + if !(node_a_hit_limit && node_b_hit_limit) { + instance = tokio::time::Instant::now(); + } + } + + tx.send(()).unwrap(); + jh.await.unwrap() + } + + // Simulate 2 Sets of Sentry nodes. + // In both Sets, a single Guarded Node should only be connected to their sentry nodes. + // While other nodes can and should connect to nodes outside of the Sentry Set. + #[tokio::test(flavor = "multi_thread")] + #[instrument] + async fn sentry_nodes_working() { + const RESERVED_NODE_SIZE: usize = 4; + + let mut p2p_config = Config::default_initialized("sentry_nodes_working"); + + async fn build_sentry_nodes(p2p_config: Config) -> (P2PService, Vec) { + let (reserved_nodes, reserved_multiaddrs) = + setup_bootstrap_nodes(&p2p_config, RESERVED_NODE_SIZE).await; + + // set up the guraded node service with `reserved_nodes_only_mode` + let guarded_node_service = { + let mut p2p_config = p2p_config.clone(); + p2p_config.reserved_nodes = reserved_multiaddrs; + p2p_config.reserved_nodes_only_mode = true; + build_service_from_config(p2p_config).await + }; + + let sentry_nodes = reserved_nodes; + + (guarded_node_service, sentry_nodes) + } + + let (mut first_guarded_node, mut first_sentry_nodes) = + build_sentry_nodes(p2p_config.clone()).await; + p2p_config.bootstrap_nodes = first_sentry_nodes + .iter() + .flat_map(|n| n.multiaddrs()) + .collect(); + + let (mut second_guarded_node, second_sentry_nodes) = + build_sentry_nodes(p2p_config).await; + + let first_sentry_set: HashSet<_> = first_sentry_nodes + .iter() + .map(|node| node.local_peer_id) + .collect(); + + let second_sentry_set: HashSet<_> = second_sentry_nodes + .iter() + .map(|node| node.local_peer_id) + .collect(); + + let mut single_sentry_node = first_sentry_nodes.pop().unwrap(); + let mut sentry_node_connections = HashSet::new(); + let (stop_sender, _) = watch::channel(()); + first_sentry_nodes + .into_iter() + .chain(second_sentry_nodes.into_iter()) + .for_each(|node| { + spawn(&stop_sender, node); + }); + + let mut instance = tokio::time::Instant::now(); + // After guards are connected to all sentries and at least one sentry has + // more connections than sentries in the group, start the timer.. + // If guards don't connected to new nodes during 5 seconds, finish the test successfully. + while instance.elapsed().as_secs() < 5 { + tokio::select! { + event_from_first_guarded = first_guarded_node.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_first_guarded { + if !first_sentry_set.contains(&peer_id) { + panic!("The node should only connect to the specified reserved nodes!"); + } + } + tracing::info!("Event from the first guarded node: {:?}", event_from_first_guarded); + }, + event_from_second_guarded = second_guarded_node.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_second_guarded { + if !second_sentry_set.contains(&peer_id) { + panic!("The node should only connect to the specified reserved nodes!"); + } + } + tracing::info!("Event from the second guarded node: {:?}", event_from_second_guarded); + }, + // Poll one of the reserved, sentry nodes + sentry_node_event = single_sentry_node.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { + sentry_node_connections.insert(peer_id); + } + } + }; + + // This reserved node has connected to more than the number of reserved nodes it is part of. + // It means it has discovered other nodes in the network. + if sentry_node_connections.len() < 2 * RESERVED_NODE_SIZE { + instance = tokio::time::Instant::now(); + } + } + stop_sender.send(()).unwrap(); + } + + // Simulates 2 p2p nodes that are on the same network and should connect via mDNS + // without any additional bootstrapping + #[tokio::test] + #[instrument] + async fn nodes_connected_via_mdns() { + // Node A + let mut p2p_config = Config::default_initialized("nodes_connected_via_mdns"); + p2p_config.enable_mdns = true; + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + let mut node_b = build_service_from_config(p2p_config).await; + + loop { + tokio::select! { + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { + // successfully connected to Node A + break + } + tracing::info!("Node B Event: {:?}", node_b_event); + }, + _ = node_a.swarm.select_next_some() => {}, + }; + } + } + + // Simulates 2 p2p nodes that are on the same network but their Fuel Upgrade checksum is different + // (different chain id or chain config) + // So they are not able to connect + #[tokio::test] + #[instrument] + async fn nodes_cannot_connect_due_to_different_checksum() { + use libp2p::TransportError; + // Node A + let mut p2p_config = + Config::default_initialized("nodes_cannot_connect_due_to_different_checksum"); + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // different checksum + p2p_config.checksum = [1u8; 32].into(); + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + // Node B + let mut node_b = build_service_from_config(p2p_config).await; + + loop { + tokio::select! { + node_a_event = node_a.swarm.select_next_some() => { + tracing::info!("Node A Event: {:?}", node_a_event); + if let SwarmEvent::IncomingConnectionError { error: ListenError::Transport(TransportError::Other(_)), .. } = node_a_event { + break + } + }, + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { + panic!("Node B should not connect to Node A!") + } + tracing::info!("Node B Event: {:?}", node_b_event); + }, + + }; + } + } + + // Simulates 3 p2p nodes, Node B & Node C are bootstrapped with Node A + // Using Identify Protocol Node C should be able to identify and connect to Node B + #[tokio::test] + #[instrument] + async fn nodes_connected_via_identify() { + // Node A + let mut p2p_config = Config::default_initialized("nodes_connected_via_identify"); + + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + // Node C + let mut node_c = build_service_from_config(p2p_config).await; + + loop { + tokio::select! { + node_a_event = node_a.next_event() => { + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + tracing::info!("Node B Event: {:?}", node_b_event); + }, + + node_c_event = node_c.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(peer_id)) = node_c_event { + // we have connected to Node B! + if peer_id == node_b.local_peer_id { + break + } + } + + tracing::info!("Node C Event: {:?}", node_c_event); + } + }; + } + } + + // Simulates 2 p2p nodes that connect to each other and consequently exchange Peer Info + // On successful connection, node B updates its latest BlockHeight + // and shares it with Peer A via Heartbeat protocol + #[tokio::test] + #[instrument] + async fn peer_info_updates_work() { + let mut p2p_config = Config::default_initialized("peer_info_updates_work"); + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config).await; + + let latest_block_height = 40_u32.into(); + + loop { + tokio::select! { + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { + if let Some(PeerInfo { heartbeat_data, client_version, .. }) = node_a.peer_manager.get_peer_info(&peer_id) { + // Exits after it verifies that: + // 1. Peer Addresses are known + // 2. Client Version is known + // 3. Node has responded with their latest BlockHeight + if client_version.is_some() && heartbeat_data.block_height == Some(latest_block_height) { + break; + } + } + } + + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { + // we've connected to Peer A + // let's update our BlockHeight + node_b.update_block_height(latest_block_height); + } + + tracing::info!("Node B Event: {:?}", node_b_event); + } + } + } + } + + #[tokio::test] + #[instrument] + async fn gossipsub_broadcast_tx_with_accept__new_tx() { + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::NewTx(Arc::new( + Transaction::default_test_tx(), + )), + GossipsubMessageAcceptance::Accept, + None, + ), + ) + .await + .unwrap(); + } + } + + #[tokio::test] + #[instrument] + async fn gossipsub_broadcast_tx_with_accept__tx_confirmations() { + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(20), + gossipsub_broadcast( + GossipsubBroadcastRequest::TxPreConfirmations(Arc::new( + PreConfirmationMessage::default_test_confirmation(), + )), + GossipsubMessageAcceptance::Accept, + None, + ), + ) + .await + .unwrap(); + } + } + + #[tokio::test] + #[instrument] + async fn gossipsub_broadcast_tx_with_reject__new_tx() { + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::NewTx(Arc::new( + Transaction::default_test_tx(), + )), + GossipsubMessageAcceptance::Reject, + None, + ), + ) + .await + .unwrap(); + } + } + + #[tokio::test] + #[instrument] + async fn gossipsub_broadcast_tx_with_reject__tx_confirmations() { + for _ in 0..100 { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::TxPreConfirmations(Arc::new( + PreConfirmationMessage::default_test_confirmation(), + )), + GossipsubMessageAcceptance::Reject, + None, + ), + ) + .await + .unwrap(); + } + } + + #[tokio::test] + #[instrument] + #[ignore] + async fn gossipsub_scoring_with_accepted_messages() { + gossipsub_scoring_tester( + "gossipsub_scoring_with_accepted_messages", + 100, + GossipsubMessageAcceptance::Accept, + ) + .await; + } + + /// At `GRAYLIST_THRESHOLD` the node will ignore all messages from the peer + /// And our PeerManager will ban the peer at that point - leading to disconnect + #[tokio::test] + #[instrument] + #[ignore] + async fn gossipsub_scoring_with_rejected_messages() { + gossipsub_scoring_tester( + "gossipsub_scoring_with_rejected_messages", + 100, + GossipsubMessageAcceptance::Reject, + ) + .await; + } + + // TODO: Move me before tests that use this function + /// Helper function for testing gossipsub scoring + /// ! Dev Note: this function runs forever, its purpose is to show the scoring in action with passage of time + async fn gossipsub_scoring_tester( + test_name: &str, + amount_of_msgs_per_second: usize, + acceptance: GossipsubMessageAcceptance, + ) { + let mut p2p_config = Config::default_initialized(test_name); + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + // Node C + p2p_config.bootstrap_nodes = node_b.multiaddrs(); + let mut node_c = build_service_from_config(p2p_config.clone()).await; + + let mut interval = tokio::time::interval(Duration::from_secs(1)); + + loop { + tokio::select! { + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_a_event { + let msg_acceptance = to_message_acceptance(&acceptance); + node_a.report_message_validation_result(&message_id, peer_id, msg_acceptance); + } + } + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_b_event { + let msg_acceptance = to_message_acceptance(&acceptance); + node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); + } + }, + node_c_event = node_c.next_event() => { + if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_c_event { + let msg_acceptance = to_message_acceptance(&acceptance); + node_c.report_message_validation_result(&message_id, peer_id, msg_acceptance); + } + }, + _ = interval.tick() => { + let mut transactions = vec![]; + for _ in 0..amount_of_msgs_per_second { + let random_tx = + TransactionBuilder::script(rand::thread_rng().gen::<[u8; 32]>().to_vec(), rand::thread_rng().gen::<[u8; 32]>().to_vec()).finalize_as_transaction(); + + transactions.push(random_tx.clone()); + let random_tx = GossipsubBroadcastRequest::NewTx(Arc::new(random_tx)); + + match rand::thread_rng().gen_range(1..=3) { + 1 => { + // Node A sends a Transaction + let _ = node_a.publish_message(random_tx); + + }, + 2 => { + // Node B sends a Transaction + let _ = node_b.publish_message(random_tx); + + }, + 3 => { + // Node C sends a Transaction + let _ = node_c.publish_message(random_tx); + }, + _ => unreachable!("Random number generator is broken") + } + } + + eprintln!("Node A WORLD VIEW"); + eprintln!("B score: {:?}", node_a.get_peer_score(&node_b.local_peer_id).unwrap()); + eprintln!("C score: {:?}", node_a.get_peer_score(&node_c.local_peer_id).unwrap()); + eprintln!(); + + eprintln!("Node B WORLD VIEW"); + eprintln!("A score: {:?}", node_b.get_peer_score(&node_a.local_peer_id).unwrap()); + eprintln!("C score: {:?}", node_b.get_peer_score(&node_c.local_peer_id).unwrap()); + eprintln!(); + + eprintln!("Node C WORLD VIEW"); + eprintln!("A score: {:?}", node_c.get_peer_score(&node_a.local_peer_id).unwrap()); + eprintln!("B score: {:?}", node_c.get_peer_score(&node_b.local_peer_id).unwrap()); + eprintln!(); + + // never ending loop + // break; + } + } + } + } + + // TODO: Move me before tests that use this function + /// Reusable helper function for Broadcasting Gossipsub requests + async fn gossipsub_broadcast( + broadcast_request: GossipsubBroadcastRequest, + acceptance: GossipsubMessageAcceptance, + connection_limit: Option, + ) { + let mut p2p_config = Config::default_initialized("gossipsub_exchanges_messages"); + + if let Some(connection_limit) = connection_limit { + p2p_config.max_gossipsub_peers_connected = connection_limit; + } + + p2p_config.subscribe_to_pre_confirmations = true; + + let (selected_topic, selected_tag): (Sha256Topic, GossipTopicTag) = { + let (topic, tag) = match broadcast_request { + GossipsubBroadcastRequest::NewTx(_) => { + (NEW_TX_GOSSIP_TOPIC, GossipTopicTag::NewTx) + } + GossipsubBroadcastRequest::TxPreConfirmations(_) => ( + TX_PRECONFIRMATIONS_GOSSIP_TOPIC, + GossipTopicTag::TxPreConfirmations, + ), + }; + + ( + Topic::new(format!("{}/{}", topic, p2p_config.network_name)), + tag, + ) + }; + tracing::info!("Selected Topic: {:?}", selected_topic); + + let mut message_sent = false; + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + // Node C + p2p_config.bootstrap_nodes = node_b.multiaddrs(); + let mut node_c = build_service_from_config(p2p_config.clone()).await; + + // Node C does not connect to Node A + // it should receive the propagated message from Node B if `GossipsubMessageAcceptance` is `Accept` + node_c + .swarm + .behaviour_mut() + .block_peer(node_a.local_peer_id); + + let mut a_connected_to_b = false; + let mut b_connected_to_c = false; + loop { + // verifies that we've got at least a single peer address to send message to + if a_connected_to_b && b_connected_to_c && !message_sent { + message_sent = true; + let broadcast_request = broadcast_request.clone(); + node_a.publish_message(broadcast_request).unwrap(); + } + + tokio::select! { + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::NewSubscription { peer_id, tag }) = &node_a_event { + if tag != &selected_tag { + tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); + } else if peer_id == &node_b.local_peer_id { + a_connected_to_b = true; + } + } + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + if let Some(FuelP2PEvent::NewSubscription { peer_id,tag, }) = &node_b_event { + tracing::info!("New subscription for peer_id: {:?} with tag: {:?}", peer_id, tag); + if tag != &selected_tag { + tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); + } else if peer_id == &node_c.local_peer_id { + b_connected_to_c = true; + } + } + + if let Some(FuelP2PEvent::GossipsubMessage { topic_hash, message, message_id, peer_id }) = node_b_event.clone() { + // Message Validation must be reported + // If it's `Accept`, Node B will propagate the message to Node C + // If it's `Ignore` or `Reject`, Node C should not receive anything + let msg_acceptance = to_message_acceptance(&acceptance); + node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); + if topic_hash != selected_topic.hash() { + tracing::error!("Wrong topic hash, expected: {} - actual: {}", selected_topic.hash(), topic_hash); + panic!("Wrong Topic"); + } + + check_message_matches_request(&message, &broadcast_request); + + // Node B received the correct message + // If we try to publish it again we will get `PublishError::Duplicate` + // This asserts that our MessageId calculation is consistent irrespective of which Peer sends it + let broadcast_request = broadcast_request.clone(); + matches!(node_b.publish_message(broadcast_request), Err(PublishError::Duplicate)); + + match acceptance { + GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { + break + }, + _ => { + // the `exit` should happen in Node C + } + } + } + + tracing::info!("Node B Event: {:?}", node_b_event); + } + + node_c_event = node_c.next_event() => { + if let Some(FuelP2PEvent::GossipsubMessage { peer_id, .. }) = node_c_event.clone() { + // Node B should be the source propagator + assert!(peer_id == node_b.local_peer_id); + match acceptance { + GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { + panic!("Node C should not receive Rejected or Ignored messages") + }, + GossipsubMessageAcceptance::Accept => { + break + } + } + } + } + }; + } + } + + fn check_message_matches_request( + message: &GossipsubMessage, + expected: &GossipsubBroadcastRequest, + ) { + match (message, expected) { + (GossipsubMessage::NewTx(received), GossipsubBroadcastRequest::NewTx(requested)) => { + assert_eq!(requested.deref(), received, "Both messages were `NewTx`s, but the received message did not match the requested message"); + } + ( + GossipsubMessage::TxPreConfirmations(received), + GossipsubBroadcastRequest::TxPreConfirmations(requested), + ) => assert_eq!(requested.deref(), received, "Both messages were `Confirmations`, but the received message did not match the requested message"), + _ => panic!("Message does not match the expected request, expected: {:?}, actual: {:?}", expected, message), + } + } + + fn arbitrary_headers_for_range(range: Range) -> Vec { + let mut blocks = Vec::new(); + for i in range { + let mut header: BlockHeader = Default::default(); + header.set_block_height(i.into()); + + let sealed_block = SealedBlockHeader { + entity: header, + consensus: Consensus::PoA(PoAConsensus::new(Default::default())), + }; + blocks.push(sealed_block); + } + blocks + } + + // Metadata gets skipped during serialization, so this is the fuzzy way to compare blocks + fn eq_except_metadata(a: &SealedBlockHeader, b: &SealedBlockHeader) -> bool { + let app_eq = match (&a.entity, &b.entity) { + (BlockHeader::V1(a), BlockHeader::V1(b)) => { + a.application() == b.application() + } + #[cfg(feature = "fault-proving")] + (BlockHeader::V2(a), BlockHeader::V2(b)) => { + a.application() == b.application() + } + #[cfg_attr(not(feature = "fault-proving"), allow(unreachable_patterns))] + _ => false, + }; + app_eq && a.entity.consensus() == b.entity.consensus() + } + + async fn request_response_works_with( + request_msg: RequestMessage, + connection_limit: Option, + ) { + let mut p2p_config = Config::default_initialized("request_response_works_with"); + + if let Some(connection_limit) = connection_limit { + p2p_config.max_request_response_peers_connected = connection_limit; + } + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); + + let mut request_sent = false; + + loop { + tokio::select! { + message_sent = rx_test_end.recv() => { + // we received a signal to end the test + assert!(message_sent.unwrap(), "Received incorrect or missing message"); + break; + } + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { + if node_a.peer_manager.get_peer_info(&peer_id).is_some() { + // 0. verifies that we've got at least a single peer address to request message from + if !request_sent { + request_sent = true; + + match request_msg.clone() { + RequestMessage::SealedHeaders(range) => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + let expected = arbitrary_headers_for_range(range.clone()); + + if let Ok(response) = response_message { + match response { + Ok((_, Ok(Ok(sealed_headers)))) => { + let check = expected.iter().zip(sealed_headers.iter()).all(|(a, b)| eq_except_metadata(a, b)); + let _ = tx_test_end.send(check).await; + }, + Ok((_, Ok(Err(e)))) => { + tracing::error!("Node A did not return any headers: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + Ok((_, Err(e))) => { + tracing::error!("Error in P2P communication: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + Err(e) => { + tracing::error!("Error in P2P before sending message: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + } + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + RequestMessage::Transactions(_range) => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::Transactions(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok(response) = response_message { + match response { + Ok((_, Ok(Ok(transactions)))) => { + let check = transactions.len() == 1 && transactions[0].0.len() == 5; + let _ = tx_test_end.send(check).await; + }, + Ok((_, Ok(Err(e)))) => { + tracing::error!("Node A did not return any transactions: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + Ok((_, Err(e))) => { + tracing::error!("Error in P2P communication: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + Err(e) => { + tracing::error!("Error in P2P before sending message: {:?}", e); + let _ = tx_test_end.send(false).await; + }, + } + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + RequestMessage::TxPoolAllTransactionsIds => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolAllTransactionsIds(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok((_, Ok(Ok(transaction_ids)))) = response_message { + let tx_ids: Vec = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); + let check = transaction_ids.len() == 5 && transaction_ids.iter().zip(tx_ids.iter()).all(|(a, b)| a == b); + let _ = tx_test_end.send(check).await; + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + RequestMessage::TxPoolFullTransactions(tx_ids) => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolFullTransactions(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok((_, Ok(Ok(transactions)))) = response_message { + let txs: Vec> = tx_ids.iter().enumerate().map(|(i, _)| { + if i == 0 { + None + } else { + Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) + } + }).collect(); + let check = transactions.len() == tx_ids.len() && transactions.iter().zip(txs.iter()).all(|(a, b)| a == b); + let _ = tx_test_end.send(check).await; + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + } + } + } + } + + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator + if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: received_request_message }) = &node_b_event { + match received_request_message { + RequestMessage::SealedHeaders(range) => { + let sealed_headers: Vec<_> = arbitrary_headers_for_range(range.clone()); + + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); + } + RequestMessage::Transactions(_) => { + let txs = (0..5).map(|_| Transaction::default_test_tx()).collect(); + let transactions = vec![Transactions(txs)]; + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::Transactions(Ok(transactions))); + } + RequestMessage::TxPoolAllTransactionsIds => { + let tx_ids = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolAllTransactionsIds(Ok(tx_ids))); + } + RequestMessage::TxPoolFullTransactions(tx_ids) => { + let txs = tx_ids.iter().enumerate().map(|(i, _)| { + if i == 0 { + None + } else { + Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) + } + }).collect(); + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolFullTransactions(Ok(txs))); + } + } + } + + tracing::info!("Node B Event: {:?}", node_b_event); + } + }; + } + } + + #[tokio::test] + #[instrument] + async fn request_response_works_with_transactions() { + let arbitrary_range = 2..6; + request_response_works_with(RequestMessage::Transactions(arbitrary_range), None) + .await + } + + #[tokio::test] + #[instrument] + async fn request_response_works_with_sealed_headers_range_inclusive() { + let arbitrary_range = 2..6; + request_response_works_with(RequestMessage::SealedHeaders(arbitrary_range), None) + .await + } + + #[tokio::test] + #[instrument] + async fn request_response_works_with_transactions_ids() { + request_response_works_with(RequestMessage::TxPoolAllTransactionsIds, None).await + } + + #[tokio::test] + #[instrument] + async fn request_response_works_with_full_transactions() { + let tx_ids = (0..10) + .map(|_| Transaction::default_test_tx().id(&ChainId::new(1))) + .collect(); + request_response_works_with(RequestMessage::TxPoolFullTransactions(tx_ids), None) + .await + } + + /// We send a request for transactions, but it's responded by only headers + #[tokio::test] + #[instrument] + async fn invalid_response_type_is_detected() { + let mut p2p_config = + Config::default_initialized("invalid_response_type_is_detected"); + + // Node A + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); + + let mut request_sent = false; + + loop { + tokio::select! { + message_sent = rx_test_end.recv() => { + // we received a signal to end the test + assert!(message_sent.unwrap(), "Received incorrect or missing message"); + break; + } + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { + if node_a.peer_manager.get_peer_info(&peer_id).is_some() { + // 0. verifies that we've got at least a single peer address to request message from + if !request_sent { + request_sent = true; + + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, RequestMessage::Transactions(0..2), ResponseSender::Transactions(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok(response) = response_message { + match response { + Ok((_, Ok(_))) => { + let _ = tx_test_end.send(false).await; + panic!("Request succeeded unexpectedly"); + }, + Ok((_, Err(ResponseError::TypeMismatch))) => { + // Got Invalid Response Type as expected, so end test + let _ = tx_test_end.send(true).await; + }, + Ok((_, Err(err))) => { + let _ = tx_test_end.send(false).await; + panic!("Unexpected error in P2P communication: {:?}", err); + }, + Err(e) => { + let _ = tx_test_end.send(false).await; + panic!("Error in P2P before sending message: {:?}", e); + }, + } + } else { + let _ = tx_test_end.send(false).await; + panic!("Orchestrator failed to receive a message: {:?}", response_message); + } + }); + } + } + } + + tracing::info!("Node A Event: {:?}", node_a_event); + }, + node_b_event = node_b.next_event() => { + // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator + if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: _ }) = &node_b_event { + let sealed_headers: Vec<_> = arbitrary_headers_for_range(1..3); + let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); + } + + tracing::info!("Node B Event: {:?}", node_b_event); + } + }; + } + } + + #[tokio::test] + #[instrument] + async fn req_res_outbound_timeout_works() { + let mut p2p_config = + Config::default_initialized("req_res_outbound_timeout_works"); + + // Node A + // setup request timeout to 1ms in order for the Request to fail + p2p_config.set_request_timeout = Duration::from_millis(1); + + let mut node_a = build_service_from_config(p2p_config.clone()).await; + + // Node B + p2p_config.bootstrap_nodes = node_a.multiaddrs(); + p2p_config.set_request_timeout = Duration::from_secs(20); + let mut node_b = build_service_from_config(p2p_config.clone()).await; + + let (tx_test_end, mut rx_test_end) = tokio::sync::mpsc::channel(1); + + // track the request sent in order to avoid duplicate sending + let mut request_sent = false; + + loop { + tokio::select! { + node_a_event = node_a.next_event() => { + if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { + if node_a.peer_manager.get_peer_info(&peer_id).is_some() { + // 0. verifies that we've got at least a single peer address to request message from + if !request_sent { + request_sent = true; + + // 1. Simulating Oneshot channel from the NetworkOrchestrator + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + + // 2a. there should be ZERO pending outbound requests in the table + assert_eq!(node_a.outbound_requests_table.len(), 0); + + // Request successfully sent + let requested_block_height = RequestMessage::SealedHeaders(0..0); + assert!(node_a.send_request_msg(None, requested_block_height, ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); + + // 2b. there should be ONE pending outbound requests in the table + assert_eq!(node_a.outbound_requests_table.len(), 1); + + let tx_test_end = tx_test_end.clone(); + + tokio::spawn(async move { + // 3. Simulating NetworkOrchestrator receiving a Timeout Error Message! + let response_message = rx_orchestrator.await; + if let Ok(response) = response_message { + match response { + Ok((_, Ok(_))) => { + let _ = tx_test_end.send(false).await; + panic!("Request succeeded unexpectedly"); + }, + Ok((_, Err(ResponseError::P2P(_)))) => { + // Got Invalid Response Type as expected, so end test + let _ = tx_test_end.send(true).await; + }, + Ok((_, Err(err))) => { + let _ = tx_test_end.send(false).await; + panic!("Unexpected error in P2P communication: {:?}", err); + }, + Err(e) => { + let _ = tx_test_end.send(false).await; + panic!("Error in P2P before sending message: {:?}", e); + }, + } + } else { + let _ = tx_test_end.send(false).await; + panic!("Orchestrator failed to receive a message: {:?}", response_message); + } + }); + } + } + } + + tracing::info!("Node A Event: {:?}", node_a_event); + }, + recv = rx_test_end.recv() => { + assert_eq!(recv, Some(true), "Test failed"); + // we received a signal to end the test + // 4. there should be ZERO pending outbound requests in the table + // after the Outbound Request Failed with Timeout + assert_eq!(node_a.outbound_requests_table.len(), 0); + break; + }, + // will not receive the request at all + node_b_event = node_b.next_event() => { + tracing::info!("Node B Event: {:?}", node_b_event); + } + }; + } + } + + #[tokio::test] + async fn gossipsub_peer_limit_works() { + tokio::time::timeout( + Duration::from_secs(5), + gossipsub_broadcast( + GossipsubBroadcastRequest::NewTx(Arc::new( + Transaction::default_test_tx(), + )), + GossipsubMessageAcceptance::Accept, + Some(1) // limit to 1 peer, therefore the function will timeout, as it will not be able to propagate the message + ), + ) + .await.expect_err("Should have timed out"); + } + + #[tokio::test] + async fn request_response_peer_limit_works() { + let handle = tokio::spawn(async { + let arbitrary_range = 2..6; + + request_response_works_with( + RequestMessage::Transactions(arbitrary_range), + Some(0), // limit to 0 peers, + ) + .await; + }); + + let result = handle.await; + assert!(result.is_err()); + } +} diff --git a/crates/services/p2p/src/p2p_service/tests.rs b/crates/services/p2p/src/p2p_service/tests.rs deleted file mode 100644 index cb18a89f460..00000000000 --- a/crates/services/p2p/src/p2p_service/tests.rs +++ /dev/null @@ -1,1392 +0,0 @@ -#![allow(non_snake_case)] -#![allow(clippy::cast_possible_truncation)] - -use super::{ - FuelP2PService, - PublishError, -}; -use crate::{ - config::Config, - gossipsub::{ - messages::{ - GossipTopicTag, - GossipsubBroadcastRequest, - GossipsubMessage, - }, - topics::{ - NEW_TX_GOSSIP_TOPIC, - TX_PRECONFIRMATIONS_GOSSIP_TOPIC, - }, - }, - p2p_service::{ - FuelP2PEvent, - GossipsubMessageHandler, - RequestResponseMessageHandler, - }, - peer_manager::PeerInfo, - request_response::messages::{ - RequestMessage, - ResponseError, - ResponseSender, - V2ResponseMessage, - }, - service::to_message_acceptance, -}; -use fuel_core_types::{ - blockchain::{ - consensus::{ - poa::PoAConsensus, - Consensus, - }, - header::BlockHeader, - SealedBlockHeader, - }, - fuel_tx::{ - Transaction, - TransactionBuilder, - TxId, - UniqueIdentifier, - }, - fuel_types::ChainId, - services::p2p::{ - GossipsubMessageAcceptance, - NetworkableTransactionPool, - PreConfirmationMessage, - Transactions, - }, -}; -use futures::{ - future::join_all, - StreamExt, -}; -use libp2p::{ - gossipsub::{ - Sha256Topic, - Topic, - }, - identity::Keypair, - swarm::{ - ListenError, - SwarmEvent, - }, - Multiaddr, - PeerId, -}; -use rand::Rng; -use std::{ - collections::HashSet, - ops::{ - Deref, - Range, - }, - sync::Arc, - time::Duration, -}; -use tokio::sync::{ - broadcast, - mpsc, - oneshot, - watch, -}; -use tracing_attributes::instrument; -type P2PService = FuelP2PService; - -/// helper function for building FuelP2PService -async fn build_service_from_config(mut p2p_config: Config) -> P2PService { - p2p_config.keypair = Keypair::generate_secp256k1(); // change keypair for each Node - let max_block_size = p2p_config.max_block_size; - let (sender, _) = - broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); - - let mut service = FuelP2PService::new( - sender, - p2p_config, - GossipsubMessageHandler::new(), - RequestResponseMessageHandler::new(max_block_size), - ) - .await - .unwrap(); - service.start().await.unwrap(); - service -} - -async fn setup_bootstrap_nodes( - p2p_config: &Config, - bootstrap_nodes_count: usize, -) -> (Vec, Vec) { - let nodes = join_all( - (0..bootstrap_nodes_count).map(|_| build_service_from_config(p2p_config.clone())), - ) - .await; - let bootstrap_multiaddrs = nodes - .iter() - .flat_map(|b| b.multiaddrs()) - .collect::>(); - (nodes, bootstrap_multiaddrs) -} - -fn spawn(stop: &watch::Sender<()>, mut node: P2PService) { - let mut stop = stop.subscribe(); - tokio::spawn(async move { - loop { - tokio::select! { - _ = node.next_event() => {} - _ = stop.changed() => { - break; - } - } - } - }); -} - -#[tokio::test] -#[instrument] -async fn p2p_service_works() { - build_service_from_config(Config::default_initialized("p2p_service_works")).await; -} - -// Single sentry node connects to multiple reserved nodes and `max_peers_allowed` amount of non-reserved nodes. -// It also tries to dial extra non-reserved nodes to establish the connection. -// A single reserved node is not started immediately with the rest of the nodes. -// Once sentry node establishes the connection with the allowed number of nodes -// we start the reserved node, and await for it to establish the connection. -// This test proves that there is always an available slot for the reserved node to connect to. -#[tokio::test(flavor = "multi_thread")] -#[instrument] -async fn reserved_nodes_reconnect_works() { - let p2p_config = Config::default_initialized("reserved_nodes_reconnect_works"); - - // total amount will be `max_peers_allowed` + `reserved_nodes.len()` - let max_peers_allowed: usize = 3; - - let (bootstrap_nodes, bootstrap_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, max_peers_allowed.saturating_mul(5)).await; - let (mut reserved_nodes, reserved_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, max_peers_allowed).await; - - let mut sentry_node = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = max_peers_allowed as u32; - - p2p_config.bootstrap_nodes = bootstrap_multiaddrs; - - p2p_config.reserved_nodes = reserved_multiaddrs; - - build_service_from_config(p2p_config).await - }; - - // pop() a single reserved node, so it's not run with the rest of the nodes - let mut reserved_node = reserved_nodes.pop(); - let reserved_node_peer_id = reserved_node.as_ref().unwrap().local_peer_id; - - let all_node_services: Vec<_> = bootstrap_nodes - .into_iter() - .chain(reserved_nodes.into_iter()) - .collect(); - - let mut all_nodes_ids: Vec = all_node_services - .iter() - .map(|service| service.local_peer_id) - .collect(); - - let (stop_sender, _) = watch::channel(()); - all_node_services.into_iter().for_each(|node| { - spawn(&stop_sender, node); - }); - - loop { - tokio::select! { - sentry_node_event = sentry_node.next_event() => { - // we've connected to all other peers - if sentry_node.peer_manager.total_peers_connected() > max_peers_allowed { - // if the `reserved_node` is not included, - // create and insert it, to be polled with rest of the nodes - if !all_nodes_ids - .iter() - .any(|local_peer_id| local_peer_id == &reserved_node_peer_id) { - if let Some(node) = reserved_node { - all_nodes_ids.push(node.local_peer_id); - spawn(&stop_sender, node); - reserved_node = None; - } - } - } - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { - // we connected to the desired reserved node - if peer_id == reserved_node_peer_id { - break - } - } - }, - } - } - stop_sender.send(()).unwrap(); -} - -#[tokio::test] -#[instrument] -async fn dont_connect_to_node_with_same_peer_id() { - let mut p2p_config = - Config::default_initialized("dont_connect_to_node_with_same_peer_id"); - let mut node_a = build_service_from_config(p2p_config.clone()).await; - // We don't use build_service_from_config here, because we want to use the same keypair - // to have the same PeerId - let node_b = { - // Given - p2p_config.reserved_nodes = node_a.multiaddrs(); - let max_block_size = p2p_config.max_block_size; - let (sender, _) = - broadcast::channel(p2p_config.reserved_nodes.len().saturating_add(1)); - - let mut service = FuelP2PService::new( - sender, - p2p_config, - GossipsubMessageHandler::new(), - RequestResponseMessageHandler::new(max_block_size), - ) - .await - .unwrap(); - service.start().await.unwrap(); - service - }; - // When - tokio::time::timeout(Duration::from_secs(5), async move { - loop { - let event = node_a.next_event().await; - if let Some(FuelP2PEvent::PeerConnected(_)) = event { - panic!("Node B should not connect to Node A because they have the same PeerId"); - } - assert_eq!(node_a.peer_manager().total_peers_connected(), 0); - } - }) - .await - // Then - .expect_err("The node should not connect to itself"); - assert_eq!(node_b.peer_manager().total_peers_connected(), 0); -} - -// We start with two nodes, node_a and node_b, bootstrapped with `bootstrap_nodes_count` other nodes. -// Yet node_a and node_b are only allowed to connect to specified amount of nodes. -#[tokio::test] -#[instrument] -async fn max_peers_connected_works() { - let p2p_config = Config::default_initialized("max_peers_connected_works"); - - let bootstrap_nodes_count = 20; - let node_a_max_peers_allowed: usize = 3; - let node_b_max_peers_allowed: usize = 5; - - let (mut nodes, nodes_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, bootstrap_nodes_count).await; - - // this node is allowed to only connect to `node_a_max_peers_allowed` other nodes - let mut node_a = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = node_a_max_peers_allowed as u32; - // it still tries to dial all nodes! - p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); - - build_service_from_config(p2p_config).await - }; - - // this node is allowed to only connect to `node_b_max_peers_allowed` other nodes - let mut node_b = { - let mut p2p_config = p2p_config.clone(); - p2p_config.max_discovery_peers_connected = node_b_max_peers_allowed as u32; - // it still tries to dial all nodes! - p2p_config.bootstrap_nodes.clone_from(&nodes_multiaddrs); - - build_service_from_config(p2p_config).await - }; - - let (tx, mut rx) = tokio::sync::oneshot::channel::<()>(); - let jh = tokio::spawn(async move { - while rx.try_recv().is_err() { - futures::stream::iter(nodes.iter_mut()) - .for_each_concurrent(4, |node| async move { - node.next_event().await; - }) - .await; - } - }); - - let mut node_a_hit_limit = false; - let mut node_b_hit_limit = false; - let mut instance = tokio::time::Instant::now(); - - // After we hit limit for node_a and node_b start timer. - // If we don't exceed the limit during 5 seconds, finish the test successfully. - while instance.elapsed().as_secs() < 5 { - tokio::select! { - event_from_node_a = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_a { - if node_a.peer_manager().total_peers_connected() > node_a_max_peers_allowed { - panic!("The node should only connect to max {node_a_max_peers_allowed} peers"); - } - node_a_hit_limit |= node_a.peer_manager().total_peers_connected() == node_a_max_peers_allowed; - } - tracing::info!("Event from the node_a: {:?}", event_from_node_a); - }, - event_from_node_b = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = event_from_node_b { - if node_b.peer_manager().total_peers_connected() > node_b_max_peers_allowed { - panic!("The node should only connect to max {node_b_max_peers_allowed} peers"); - } - node_b_hit_limit |= node_b.peer_manager().total_peers_connected() == node_b_max_peers_allowed; - } - tracing::info!("Event from the node_b: {:?}", event_from_node_b); - }, - } - - if !(node_a_hit_limit && node_b_hit_limit) { - instance = tokio::time::Instant::now(); - } - } - - tx.send(()).unwrap(); - jh.await.unwrap() -} - -// Simulate 2 Sets of Sentry nodes. -// In both Sets, a single Guarded Node should only be connected to their sentry nodes. -// While other nodes can and should connect to nodes outside of the Sentry Set. -#[tokio::test(flavor = "multi_thread")] -#[instrument] -async fn sentry_nodes_working() { - const RESERVED_NODE_SIZE: usize = 4; - - let mut p2p_config = Config::default_initialized("sentry_nodes_working"); - - async fn build_sentry_nodes(p2p_config: Config) -> (P2PService, Vec) { - let (reserved_nodes, reserved_multiaddrs) = - setup_bootstrap_nodes(&p2p_config, RESERVED_NODE_SIZE).await; - - // set up the guraded node service with `reserved_nodes_only_mode` - let guarded_node_service = { - let mut p2p_config = p2p_config.clone(); - p2p_config.reserved_nodes = reserved_multiaddrs; - p2p_config.reserved_nodes_only_mode = true; - build_service_from_config(p2p_config).await - }; - - let sentry_nodes = reserved_nodes; - - (guarded_node_service, sentry_nodes) - } - - let (mut first_guarded_node, mut first_sentry_nodes) = - build_sentry_nodes(p2p_config.clone()).await; - p2p_config.bootstrap_nodes = first_sentry_nodes - .iter() - .flat_map(|n| n.multiaddrs()) - .collect(); - - let (mut second_guarded_node, second_sentry_nodes) = - build_sentry_nodes(p2p_config).await; - - let first_sentry_set: HashSet<_> = first_sentry_nodes - .iter() - .map(|node| node.local_peer_id) - .collect(); - - let second_sentry_set: HashSet<_> = second_sentry_nodes - .iter() - .map(|node| node.local_peer_id) - .collect(); - - let mut single_sentry_node = first_sentry_nodes.pop().unwrap(); - let mut sentry_node_connections = HashSet::new(); - let (stop_sender, _) = watch::channel(()); - first_sentry_nodes - .into_iter() - .chain(second_sentry_nodes.into_iter()) - .for_each(|node| { - spawn(&stop_sender, node); - }); - - let mut instance = tokio::time::Instant::now(); - // After guards are connected to all sentries and at least one sentry has - // more connections than sentries in the group, start the timer.. - // If guards don't connected to new nodes during 5 seconds, finish the test successfully. - while instance.elapsed().as_secs() < 5 { - tokio::select! { - event_from_first_guarded = first_guarded_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_first_guarded { - if !first_sentry_set.contains(&peer_id) { - panic!("The node should only connect to the specified reserved nodes!"); - } - } - tracing::info!("Event from the first guarded node: {:?}", event_from_first_guarded); - }, - event_from_second_guarded = second_guarded_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = event_from_second_guarded { - if !second_sentry_set.contains(&peer_id) { - panic!("The node should only connect to the specified reserved nodes!"); - } - } - tracing::info!("Event from the second guarded node: {:?}", event_from_second_guarded); - }, - // Poll one of the reserved, sentry nodes - sentry_node_event = single_sentry_node.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = sentry_node_event { - sentry_node_connections.insert(peer_id); - } - } - }; - - // This reserved node has connected to more than the number of reserved nodes it is part of. - // It means it has discovered other nodes in the network. - if sentry_node_connections.len() < 2 * RESERVED_NODE_SIZE { - instance = tokio::time::Instant::now(); - } - } - stop_sender.send(()).unwrap(); -} - -// Simulates 2 p2p nodes that are on the same network and should connect via mDNS -// without any additional bootstrapping -#[tokio::test] -#[instrument] -async fn nodes_connected_via_mdns() { - // Node A - let mut p2p_config = Config::default_initialized("nodes_connected_via_mdns"); - p2p_config.enable_mdns = true; - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - let mut node_b = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - // successfully connected to Node A - break - } - tracing::info!("Node B Event: {:?}", node_b_event); - }, - _ = node_a.swarm.select_next_some() => {}, - }; - } -} - -// Simulates 2 p2p nodes that are on the same network but their Fuel Upgrade checksum is different -// (different chain id or chain config) -// So they are not able to connect -#[tokio::test] -#[instrument] -async fn nodes_cannot_connect_due_to_different_checksum() { - use libp2p::TransportError; - // Node A - let mut p2p_config = - Config::default_initialized("nodes_cannot_connect_due_to_different_checksum"); - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // different checksum - p2p_config.checksum = [1u8; 32].into(); - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - // Node B - let mut node_b = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_a_event = node_a.swarm.select_next_some() => { - tracing::info!("Node A Event: {:?}", node_a_event); - if let SwarmEvent::IncomingConnectionError { error: ListenError::Transport(TransportError::Other(_)), .. } = node_a_event { - break - } - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - panic!("Node B should not connect to Node A!") - } - tracing::info!("Node B Event: {:?}", node_b_event); - }, - - }; - } -} - -// Simulates 3 p2p nodes, Node B & Node C are bootstrapped with Node A -// Using Identify Protocol Node C should be able to identify and connect to Node B -#[tokio::test] -#[instrument] -async fn nodes_connected_via_identify() { - // Node A - let mut p2p_config = Config::default_initialized("nodes_connected_via_identify"); - - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - let mut node_c = build_service_from_config(p2p_config).await; - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - tracing::info!("Node B Event: {:?}", node_b_event); - }, - - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(peer_id)) = node_c_event { - // we have connected to Node B! - if peer_id == node_b.local_peer_id { - break - } - } - - tracing::info!("Node C Event: {:?}", node_c_event); - } - }; - } -} - -// Simulates 2 p2p nodes that connect to each other and consequently exchange Peer Info -// On successful connection, node B updates its latest BlockHeight -// and shares it with Peer A via Heartbeat protocol -#[tokio::test] -#[instrument] -async fn peer_info_updates_work() { - let mut p2p_config = Config::default_initialized("peer_info_updates_work"); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config).await; - - let latest_block_height = 40_u32.into(); - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if let Some(PeerInfo { heartbeat_data, client_version, .. }) = node_a.peer_manager.get_peer_info(&peer_id) { - // Exits after it verifies that: - // 1. Peer Addresses are known - // 2. Client Version is known - // 3. Node has responded with their latest BlockHeight - if client_version.is_some() && heartbeat_data.block_height == Some(latest_block_height) { - break; - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::PeerConnected(_)) = node_b_event { - // we've connected to Peer A - // let's update our BlockHeight - node_b.update_block_height(latest_block_height); - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - } - } -} - -#[tokio::test] -#[instrument] -async fn gossipsub_broadcast_tx_with_accept__new_tx() { - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx( - Arc::new(Transaction::default_test_tx()), - ), - GossipsubMessageAcceptance::Accept, - None, - ), - ) - .await - .unwrap(); - } -} - -#[tokio::test] -#[instrument] -async fn gossipsub_broadcast_tx_with_accept__tx_confirmations() { - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(20), - gossipsub_broadcast( - GossipsubBroadcastRequest::TxPreConfirmations(Arc::new( - PreConfirmationMessage::default_test_confirmation(), - )), - GossipsubMessageAcceptance::Accept, - None, - ), - ) - .await - .unwrap(); - } -} - -#[tokio::test] -#[instrument] -async fn gossipsub_broadcast_tx_with_reject__new_tx() { - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx( - Arc::new(Transaction::default_test_tx()), - ), - GossipsubMessageAcceptance::Reject, - None, - ), - ) - .await - .unwrap(); - } -} - -#[tokio::test] -#[instrument] -async fn gossipsub_broadcast_tx_with_reject__tx_confirmations() { - for _ in 0..100 { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::TxPreConfirmations(Arc::new( - PreConfirmationMessage::default_test_confirmation(), - )), - GossipsubMessageAcceptance::Reject, - None, - ), - ) - .await - .unwrap(); - } -} - -#[tokio::test] -#[instrument] -#[ignore] -async fn gossipsub_scoring_with_accepted_messages() { - gossipsub_scoring_tester( - "gossipsub_scoring_with_accepted_messages", - 100, - GossipsubMessageAcceptance::Accept, - ) - .await; -} - -/// At `GRAYLIST_THRESHOLD` the node will ignore all messages from the peer -/// And our PeerManager will ban the peer at that point - leading to disconnect -#[tokio::test] -#[instrument] -#[ignore] -async fn gossipsub_scoring_with_rejected_messages() { - gossipsub_scoring_tester( - "gossipsub_scoring_with_rejected_messages", - 100, - GossipsubMessageAcceptance::Reject, - ) - .await; -} - -// TODO: Move me before tests that use this function -/// Helper function for testing gossipsub scoring -/// ! Dev Note: this function runs forever, its purpose is to show the scoring in action with passage of time -async fn gossipsub_scoring_tester( - test_name: &str, - amount_of_msgs_per_second: usize, - acceptance: GossipsubMessageAcceptance, -) { - let mut p2p_config = Config::default_initialized(test_name); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - p2p_config.bootstrap_nodes = node_b.multiaddrs(); - let mut node_c = build_service_from_config(p2p_config.clone()).await; - - let mut interval = tokio::time::interval(Duration::from_secs(1)); - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_a_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_a.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - } - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_b_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - }, - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { message_id, peer_id, .. }) = node_c_event { - let msg_acceptance = to_message_acceptance(&acceptance); - node_c.report_message_validation_result(&message_id, peer_id, msg_acceptance); - } - }, - _ = interval.tick() => { - let mut transactions = vec![]; - for _ in 0..amount_of_msgs_per_second { - let random_tx = - TransactionBuilder::script(rand::thread_rng().gen::<[u8; 32]>().to_vec(), rand::thread_rng().gen::<[u8; 32]>().to_vec()).finalize_as_transaction(); - - transactions.push(random_tx.clone()); - let random_tx = GossipsubBroadcastRequest::NewTx(Arc::new(random_tx)); - - match rand::thread_rng().gen_range(1..=3) { - 1 => { - // Node A sends a Transaction - let _ = node_a.publish_message(random_tx); - - }, - 2 => { - // Node B sends a Transaction - let _ = node_b.publish_message(random_tx); - - }, - 3 => { - // Node C sends a Transaction - let _ = node_c.publish_message(random_tx); - }, - _ => unreachable!("Random number generator is broken") - } - } - - eprintln!("Node A WORLD VIEW"); - eprintln!("B score: {:?}", node_a.get_peer_score(&node_b.local_peer_id).unwrap()); - eprintln!("C score: {:?}", node_a.get_peer_score(&node_c.local_peer_id).unwrap()); - eprintln!(); - - eprintln!("Node B WORLD VIEW"); - eprintln!("A score: {:?}", node_b.get_peer_score(&node_a.local_peer_id).unwrap()); - eprintln!("C score: {:?}", node_b.get_peer_score(&node_c.local_peer_id).unwrap()); - eprintln!(); - - eprintln!("Node C WORLD VIEW"); - eprintln!("A score: {:?}", node_c.get_peer_score(&node_a.local_peer_id).unwrap()); - eprintln!("B score: {:?}", node_c.get_peer_score(&node_b.local_peer_id).unwrap()); - eprintln!(); - - // never ending loop - // break; - } - } - } -} - -// TODO: Move me before tests that use this function -/// Reusable helper function for Broadcasting Gossipsub requests -async fn gossipsub_broadcast( - broadcast_request: GossipsubBroadcastRequest, - acceptance: GossipsubMessageAcceptance, - connection_limit: Option, -) { - let mut p2p_config = Config::default_initialized("gossipsub_exchanges_messages"); - - if let Some(connection_limit) = connection_limit { - p2p_config.max_gossipsub_peers_connected = connection_limit; - } - - p2p_config.subscribe_to_pre_confirmations = true; - - let (selected_topic, selected_tag): (Sha256Topic, GossipTopicTag) = { - let (topic, tag) = match broadcast_request { - GossipsubBroadcastRequest::NewTx(_) => { - (NEW_TX_GOSSIP_TOPIC, GossipTopicTag::NewTx) - } - GossipsubBroadcastRequest::TxPreConfirmations(_) => ( - TX_PRECONFIRMATIONS_GOSSIP_TOPIC, - GossipTopicTag::TxPreConfirmations, - ), - }; - - ( - Topic::new(format!("{}/{}", topic, p2p_config.network_name)), - tag, - ) - }; - tracing::info!("Selected Topic: {:?}", selected_topic); - - let mut message_sent = false; - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - // Node C - p2p_config.bootstrap_nodes = node_b.multiaddrs(); - let mut node_c = build_service_from_config(p2p_config.clone()).await; - - // Node C does not connect to Node A - // it should receive the propagated message from Node B if `GossipsubMessageAcceptance` is `Accept` - node_c - .swarm - .behaviour_mut() - .block_peer(node_a.local_peer_id); - - let mut a_connected_to_b = false; - let mut b_connected_to_c = false; - loop { - // verifies that we've got at least a single peer address to send message to - if a_connected_to_b && b_connected_to_c && !message_sent { - message_sent = true; - let broadcast_request = broadcast_request.clone(); - node_a.publish_message(broadcast_request).unwrap(); - } - - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::NewSubscription { peer_id, tag }) = &node_a_event { - if tag != &selected_tag { - tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); - } else if peer_id == &node_b.local_peer_id { - a_connected_to_b = true; - } - } - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - if let Some(FuelP2PEvent::NewSubscription { peer_id,tag, }) = &node_b_event { - tracing::info!("New subscription for peer_id: {:?} with tag: {:?}", peer_id, tag); - if tag != &selected_tag { - tracing::info!("Wrong tag, expected: {:?}, actual: {:?}", selected_tag, tag); - } else if peer_id == &node_c.local_peer_id { - b_connected_to_c = true; - } - } - - if let Some(FuelP2PEvent::GossipsubMessage { topic_hash, message, message_id, peer_id }) = node_b_event.clone() { - // Message Validation must be reported - // If it's `Accept`, Node B will propagate the message to Node C - // If it's `Ignore` or `Reject`, Node C should not receive anything - let msg_acceptance = to_message_acceptance(&acceptance); - node_b.report_message_validation_result(&message_id, peer_id, msg_acceptance); - if topic_hash != selected_topic.hash() { - tracing::error!("Wrong topic hash, expected: {} - actual: {}", selected_topic.hash(), topic_hash); - panic!("Wrong Topic"); - } - - check_message_matches_request(&message, &broadcast_request); - - // Node B received the correct message - // If we try to publish it again we will get `PublishError::Duplicate` - // This asserts that our MessageId calculation is consistent irrespective of which Peer sends it - let broadcast_request = broadcast_request.clone(); - matches!(node_b.publish_message(broadcast_request), Err(PublishError::Duplicate)); - - match acceptance { - GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { - break - }, - _ => { - // the `exit` should happen in Node C - } - } - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - - node_c_event = node_c.next_event() => { - if let Some(FuelP2PEvent::GossipsubMessage { peer_id, .. }) = node_c_event.clone() { - // Node B should be the source propagator - assert!(peer_id == node_b.local_peer_id); - match acceptance { - GossipsubMessageAcceptance::Reject | GossipsubMessageAcceptance::Ignore => { - panic!("Node C should not receive Rejected or Ignored messages") - }, - GossipsubMessageAcceptance::Accept => { - break - } - } - } - } - }; - } -} - -fn check_message_matches_request( - message: &GossipsubMessage, - expected: &GossipsubBroadcastRequest, -) { - match (message, expected) { - (GossipsubMessage::NewTx(received), GossipsubBroadcastRequest::NewTx(requested)) => { - assert_eq!(requested.deref(), received, "Both messages were `NewTx`s, but the received message did not match the requested message"); - } - ( - GossipsubMessage::TxPreConfirmations(received), - GossipsubBroadcastRequest::TxPreConfirmations(requested), - ) => assert_eq!(requested.deref(), received, "Both messages were `Confirmations`, but the received message did not match the requested message"), - _ => panic!("Message does not match the expected request, expected: {:?}, actual: {:?}", expected, message), - } -} - -fn arbitrary_headers_for_range(range: Range) -> Vec { - let mut blocks = Vec::new(); - for i in range { - let mut header: BlockHeader = Default::default(); - header.set_block_height(i.into()); - - let sealed_block = SealedBlockHeader { - entity: header, - consensus: Consensus::PoA(PoAConsensus::new(Default::default())), - }; - blocks.push(sealed_block); - } - blocks -} - -// Metadata gets skipped during serialization, so this is the fuzzy way to compare blocks -fn eq_except_metadata(a: &SealedBlockHeader, b: &SealedBlockHeader) -> bool { - let app_eq = match (&a.entity, &b.entity) { - (BlockHeader::V1(a), BlockHeader::V1(b)) => a.application() == b.application(), - #[cfg(feature = "fault-proving")] - (BlockHeader::V2(a), BlockHeader::V2(b)) => a.application() == b.application(), - #[cfg_attr(not(feature = "fault-proving"), allow(unreachable_patterns))] - _ => false, - }; - app_eq && a.entity.consensus() == b.entity.consensus() -} - -async fn request_response_works_with( - request_msg: RequestMessage, - connection_limit: Option, -) { - let mut p2p_config = Config::default_initialized("request_response_works_with"); - - if let Some(connection_limit) = connection_limit { - p2p_config.max_request_response_peers_connected = connection_limit; - } - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); - - let mut request_sent = false; - - loop { - tokio::select! { - message_sent = rx_test_end.recv() => { - // we received a signal to end the test - assert!(message_sent.unwrap(), "Received incorrect or missing message"); - break; - } - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - match request_msg.clone() { - RequestMessage::SealedHeaders(range) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - let expected = arbitrary_headers_for_range(range.clone()); - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(Ok(sealed_headers)))) => { - let check = expected.iter().zip(sealed_headers.iter()).all(|(a, b)| eq_except_metadata(a, b)); - let _ = tx_test_end.send(check).await; - }, - Ok((_, Ok(Err(e)))) => { - tracing::error!("Node A did not return any headers: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Ok((_, Err(e))) => { - tracing::error!("Error in P2P communication: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Err(e) => { - tracing::error!("Error in P2P before sending message: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - } - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::Transactions(_range) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::Transactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(Ok(transactions)))) => { - let check = transactions.len() == 1 && transactions[0].0.len() == 5; - let _ = tx_test_end.send(check).await; - }, - Ok((_, Ok(Err(e)))) => { - tracing::error!("Node A did not return any transactions: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Ok((_, Err(e))) => { - tracing::error!("Error in P2P communication: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - Err(e) => { - tracing::error!("Error in P2P before sending message: {:?}", e); - let _ = tx_test_end.send(false).await; - }, - } - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::TxPoolAllTransactionsIds => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolAllTransactionsIds(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok((_, Ok(Ok(transaction_ids)))) = response_message { - let tx_ids: Vec = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); - let check = transaction_ids.len() == 5 && transaction_ids.iter().zip(tx_ids.iter()).all(|(a, b)| a == b); - let _ = tx_test_end.send(check).await; - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - RequestMessage::TxPoolFullTransactions(tx_ids) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseSender::TxPoolFullTransactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok((_, Ok(Ok(transactions)))) = response_message { - let txs: Vec> = tx_ids.iter().enumerate().map(|(i, _)| { - if i == 0 { - None - } else { - Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) - } - }).collect(); - let check = transactions.len() == tx_ids.len() && transactions.iter().zip(txs.iter()).all(|(a, b)| a == b); - let _ = tx_test_end.send(check).await; - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } - } - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator - if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: received_request_message }) = &node_b_event { - match received_request_message { - RequestMessage::SealedHeaders(range) => { - let sealed_headers: Vec<_> = arbitrary_headers_for_range(range.clone()); - - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); - } - RequestMessage::Transactions(_) => { - let txs = (0..5).map(|_| Transaction::default_test_tx()).collect(); - let transactions = vec![Transactions(txs)]; - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::Transactions(Ok(transactions))); - } - RequestMessage::TxPoolAllTransactionsIds => { - let tx_ids = (0..5).map(|_| Transaction::default_test_tx().id(&ChainId::new(1))).collect(); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolAllTransactionsIds(Ok(tx_ids))); - } - RequestMessage::TxPoolFullTransactions(tx_ids) => { - let txs = tx_ids.iter().enumerate().map(|(i, _)| { - if i == 0 { - None - } else { - Some(NetworkableTransactionPool::Transaction(Transaction::default_test_tx())) - } - }).collect(); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::TxPoolFullTransactions(Ok(txs))); - } - } - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } -} - -#[tokio::test] -#[instrument] -async fn request_response_works_with_transactions() { - let arbitrary_range = 2..6; - request_response_works_with(RequestMessage::Transactions(arbitrary_range), None).await -} - -#[tokio::test] -#[instrument] -async fn request_response_works_with_sealed_headers_range_inclusive() { - let arbitrary_range = 2..6; - request_response_works_with(RequestMessage::SealedHeaders(arbitrary_range), None) - .await -} - -#[tokio::test] -#[instrument] -async fn request_response_works_with_transactions_ids() { - request_response_works_with(RequestMessage::TxPoolAllTransactionsIds, None).await -} - -#[tokio::test] -#[instrument] -async fn request_response_works_with_full_transactions() { - let tx_ids = (0..10) - .map(|_| Transaction::default_test_tx().id(&ChainId::new(1))) - .collect(); - request_response_works_with(RequestMessage::TxPoolFullTransactions(tx_ids), None) - .await -} - -/// We send a request for transactions, but it's responded by only headers -#[tokio::test] -#[instrument] -async fn invalid_response_type_is_detected() { - let mut p2p_config = Config::default_initialized("invalid_response_type_is_detected"); - - // Node A - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = mpsc::channel::(1); - - let mut request_sent = false; - - loop { - tokio::select! { - message_sent = rx_test_end.recv() => { - // we received a signal to end the test - assert!(message_sent.unwrap(), "Received incorrect or missing message"); - break; - } - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, RequestMessage::Transactions(0..2), ResponseSender::Transactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok(response) = response_message { - match response { - Ok((_, Ok(_))) => { - let _ = tx_test_end.send(false).await; - panic!("Request succeeded unexpectedly"); - }, - Ok((_, Err(ResponseError::TypeMismatch))) => { - // Got Invalid Response Type as expected, so end test - let _ = tx_test_end.send(true).await; - }, - Ok((_, Err(err))) => { - let _ = tx_test_end.send(false).await; - panic!("Unexpected error in P2P communication: {:?}", err); - }, - Err(e) => { - let _ = tx_test_end.send(false).await; - panic!("Error in P2P before sending message: {:?}", e); - }, - } - } else { - let _ = tx_test_end.send(false).await; - panic!("Orchestrator failed to receive a message: {:?}", response_message); - } - }); - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - node_b_event = node_b.next_event() => { - // 2. Node B receives the RequestMessage from Node A initiated by the NetworkOrchestrator - if let Some(FuelP2PEvent::InboundRequestMessage{ request_id, request_message: _ }) = &node_b_event { - let sealed_headers: Vec<_> = arbitrary_headers_for_range(1..3); - let _ = node_b.send_response_msg(*request_id, V2ResponseMessage::SealedHeaders(Ok(sealed_headers))); - } - - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } -} - -#[tokio::test] -#[instrument] -async fn req_res_outbound_timeout_works() { - let mut p2p_config = Config::default_initialized("req_res_outbound_timeout_works"); - - // Node A - // setup request timeout to 1ms in order for the Request to fail - p2p_config.set_request_timeout = Duration::from_millis(1); - - let mut node_a = build_service_from_config(p2p_config.clone()).await; - - // Node B - p2p_config.bootstrap_nodes = node_a.multiaddrs(); - p2p_config.set_request_timeout = Duration::from_secs(20); - let mut node_b = build_service_from_config(p2p_config.clone()).await; - - let (tx_test_end, mut rx_test_end) = tokio::sync::mpsc::channel(1); - - // track the request sent in order to avoid duplicate sending - let mut request_sent = false; - - loop { - tokio::select! { - node_a_event = node_a.next_event() => { - if let Some(FuelP2PEvent::PeerInfoUpdated { peer_id, block_height: _ }) = node_a_event { - if node_a.peer_manager.get_peer_info(&peer_id).is_some() { - // 0. verifies that we've got at least a single peer address to request message from - if !request_sent { - request_sent = true; - - // 1. Simulating Oneshot channel from the NetworkOrchestrator - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - - // 2a. there should be ZERO pending outbound requests in the table - assert_eq!(node_a.outbound_requests_table.len(), 0); - - // Request successfully sent - let requested_block_height = RequestMessage::SealedHeaders(0..0); - assert!(node_a.send_request_msg(None, requested_block_height, ResponseSender::SealedHeaders(tx_orchestrator)).is_ok()); - - // 2b. there should be ONE pending outbound requests in the table - assert_eq!(node_a.outbound_requests_table.len(), 1); - - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - // 3. Simulating NetworkOrchestrator receiving a Timeout Error Message! - let response_message = rx_orchestrator.await; - if let Ok(response) = response_message { - match response { - Ok((_, Ok(_))) => { - let _ = tx_test_end.send(false).await; - panic!("Request succeeded unexpectedly"); - }, - Ok((_, Err(ResponseError::P2P(_)))) => { - // Got Invalid Response Type as expected, so end test - let _ = tx_test_end.send(true).await; - }, - Ok((_, Err(err))) => { - let _ = tx_test_end.send(false).await; - panic!("Unexpected error in P2P communication: {:?}", err); - }, - Err(e) => { - let _ = tx_test_end.send(false).await; - panic!("Error in P2P before sending message: {:?}", e); - }, - } - } else { - let _ = tx_test_end.send(false).await; - panic!("Orchestrator failed to receive a message: {:?}", response_message); - } - }); - } - } - } - - tracing::info!("Node A Event: {:?}", node_a_event); - }, - recv = rx_test_end.recv() => { - assert_eq!(recv, Some(true), "Test failed"); - // we received a signal to end the test - // 4. there should be ZERO pending outbound requests in the table - // after the Outbound Request Failed with Timeout - assert_eq!(node_a.outbound_requests_table.len(), 0); - break; - }, - // will not receive the request at all - node_b_event = node_b.next_event() => { - tracing::info!("Node B Event: {:?}", node_b_event); - } - }; - } -} - -#[tokio::test] -async fn gossipsub_peer_limit_works() { - tokio::time::timeout( - Duration::from_secs(5), - gossipsub_broadcast( - GossipsubBroadcastRequest::NewTx(Arc::new( - Transaction::default_test_tx(), - )), - GossipsubMessageAcceptance::Accept, - Some(1) // limit to 1 peer, therefore the function will timeout, as it will not be able to propagate the message - ), - ) - .await.expect_err("Should have timed out"); -} - -#[tokio::test] -async fn request_response_peer_limit_works() { - let handle = tokio::spawn(async { - let arbitrary_range = 2..6; - - request_response_works_with( - RequestMessage::Transactions(arbitrary_range), - Some(0), // limit to 0 peers, - ) - .await; - }); - - let result = handle.await; - assert!(result.is_err()); -} diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 1ae76a20967..ad9c806fc0c 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -109,12 +109,6 @@ use tokio::{ }; use tracing::warn; -#[cfg(test)] -pub mod task_tests; - -#[cfg(test)] -pub mod broadcast_tests; - const CHANNEL_SIZE: usize = 1024 * 10; pub type Service = ServiceRunner>; @@ -1454,3 +1448,623 @@ fn report_message( warn!(target: "fuel-p2p", "Failed to read PeerId from received GossipsubMessageId: {}", msg_id); } } +#[cfg(test)] +pub mod task_tests { + #![allow(non_snake_case)] + use crate::ports::P2pDb; + + use super::*; + + use crate::{ + gossipsub::topics::TX_PRECONFIRMATIONS_GOSSIP_TOPIC, + peer_manager::heartbeat_data::HeartbeatData, + }; + use fuel_core_services::{ + Service, + State, + }; + use fuel_core_storage::Result as StorageResult; + use fuel_core_types::{ + blockchain::consensus::Genesis, + fuel_types::BlockHeight, + services::p2p::PreConfirmationMessage, + }; + use futures::FutureExt; + use libp2p::gossipsub::TopicHash; + use std::{ + collections::VecDeque, + time::SystemTime, + }; + + #[derive(Clone, Debug)] + struct FakeDb; + + impl AtomicView for FakeDb { + type LatestView = Self; + + fn latest_view(&self) -> StorageResult { + Ok(self.clone()) + } + } + + impl P2pDb for FakeDb { + fn get_sealed_headers( + &self, + _block_height_range: Range, + ) -> StorageResult>> { + unimplemented!() + } + + fn get_transactions( + &self, + _block_height_range: Range, + ) -> StorageResult>> { + unimplemented!() + } + + fn get_genesis(&self) -> StorageResult { + Ok(Default::default()) + } + } + + #[derive(Clone, Debug)] + struct FakeBlockImporter; + + impl BlockHeightImporter for FakeBlockImporter { + fn next_block_height(&self) -> BoxStream { + Box::pin(fuel_core_services::stream::pending()) + } + } + + #[derive(Clone, Debug)] + struct FakeTxPool; + + impl TxPool for FakeTxPool { + async fn get_tx_ids( + &self, + _max_txs: usize, + ) -> anyhow::Result> { + Ok(vec![]) + } + + async fn get_full_txs( + &self, + tx_ids: Vec, + ) -> anyhow::Result>> { + Ok(tx_ids.iter().map(|_| None).collect()) + } + } + + #[tokio::test] + async fn start_and_stop_awaits_works() { + let p2p_config = Config::::default("start_stop_works"); + let (shared_state, request_receiver) = build_shared_state(p2p_config.clone()); + let service = new_service( + ChainId::default(), + 0.into(), + p2p_config, + shared_state, + request_receiver, + FakeDb, + FakeBlockImporter, + FakeTxPool, + ); + + // Node with p2p service started + assert!(service.start_and_await().await.unwrap().started()); + // Node with p2p service stopped + assert!(service.stop_and_await().await.unwrap().stopped()); + } + + struct FakeP2PService { + peer_info: Vec<(PeerId, PeerInfo)>, + next_event_stream: BoxStream, + } + + impl TaskP2PService for FakeP2PService { + fn update_metrics(&self, _: T) + where + T: FnOnce(), + { + unimplemented!() + } + + fn get_all_peer_info(&self) -> Vec<(&PeerId, &PeerInfo)> { + self.peer_info.iter().map(|tup| (&tup.0, &tup.1)).collect() + } + + fn get_peer_id_with_height(&self, _height: &BlockHeight) -> Option { + todo!() + } + + fn next_event(&mut self) -> BoxFuture<'_, Option> { + self.next_event_stream.next().boxed() + } + + fn publish_message( + &mut self, + _message: GossipsubBroadcastRequest, + ) -> anyhow::Result<()> { + todo!() + } + + fn send_request_msg( + &mut self, + _peer_id: Option, + _request_msg: RequestMessage, + _on_response: ResponseSender, + ) -> anyhow::Result<()> { + todo!() + } + + fn send_response_msg( + &mut self, + _request_id: InboundRequestId, + _message: V2ResponseMessage, + ) -> anyhow::Result<()> { + todo!() + } + + fn report_message( + &mut self, + _message: GossipsubMessageInfo, + _acceptance: GossipsubMessageAcceptance, + ) -> anyhow::Result<()> { + todo!() + } + + fn report_peer( + &mut self, + _peer_id: PeerId, + _score: AppScore, + _reporting_service: &str, + ) -> anyhow::Result<()> { + todo!() + } + + fn update_block_height(&mut self, _height: BlockHeight) -> anyhow::Result<()> { + Ok(()) + } + } + + #[derive(Clone)] + struct FakeDB; + + impl AtomicView for FakeDB { + type LatestView = Self; + + fn latest_view(&self) -> StorageResult { + Ok(self.clone()) + } + } + + impl P2pDb for FakeDB { + fn get_sealed_headers( + &self, + _block_height_range: Range, + ) -> StorageResult>> { + todo!() + } + + fn get_transactions( + &self, + _block_height_range: Range, + ) -> StorageResult>> { + todo!() + } + + fn get_genesis(&self) -> StorageResult { + todo!() + } + } + + struct FakeBroadcast { + pub peer_reports: mpsc::Sender<(FuelPeerId, AppScore, String)>, + pub confirmation_gossip_broadcast: mpsc::Sender, + } + + impl Broadcast for FakeBroadcast { + fn report_peer( + &self, + peer_id: FuelPeerId, + report: AppScore, + reporting_service: &'static str, + ) -> anyhow::Result<()> { + self.peer_reports.try_send(( + peer_id, + report, + reporting_service.to_string(), + ))?; + Ok(()) + } + + fn block_height_broadcast( + &self, + _block_height_data: BlockHeightHeartbeatData, + ) -> anyhow::Result<()> { + todo!() + } + + fn tx_broadcast( + &self, + _transaction: TransactionGossipData, + ) -> anyhow::Result<()> { + todo!() + } + + fn confirmations_broadcast( + &self, + confirmations: ConfirmationsGossipData, + ) -> anyhow::Result<()> { + self.confirmation_gossip_broadcast.try_send(confirmations)?; + Ok(()) + } + + fn new_tx_subscription_broadcast( + &self, + _peer_id: FuelPeerId, + ) -> anyhow::Result<()> { + todo!() + } + } + + #[tokio::test] + async fn peer_heartbeat_reputation_checks__slow_heartbeat_sends_reports() { + // given + let peer_id = PeerId::random(); + // more than limit + let last_duration = Duration::from_secs(30); + let mut durations = VecDeque::new(); + durations.push_front(last_duration); + + let heartbeat_data = HeartbeatData { + block_height: None, + last_heartbeat: Instant::now(), + last_heartbeat_sys: SystemTime::now(), + window: 0, + durations, + }; + let peer_info = PeerInfo { + peer_addresses: Default::default(), + client_version: None, + heartbeat_data, + score: 100.0, + }; + let peer_info = vec![(peer_id, peer_info)]; + let p2p_service = FakeP2PService { + peer_info, + next_event_stream: Box::pin(futures::stream::pending()), + }; + let (request_sender, request_receiver) = mpsc::channel(100); + + let (report_sender, mut report_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: report_sender, + confirmation_gossip_broadcast: mpsc::channel(100).0, + }; + + // Less than actual + let heartbeat_max_avg_interval = Duration::from_secs(20); + // Greater than actual + let heartbeat_max_time_since_last = Duration::from_secs(40); + + // Arbitrary values + let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { + old_heartbeat_penalty: 5.6, + low_heartbeat_frequency_penalty: 20.45, + }; + + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + view_provider: FakeDB, + next_block_height: FakeBlockImporter.next_block_height(), + tx_pool: FakeTxPool, + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval, + heartbeat_max_time_since_last, + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); + let mut watcher = StateWatcher::from(watch_receiver); + + // when + let (report_peer_id, report, reporting_service) = tokio::time::timeout( + Duration::from_secs(1), + wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), + ) + .await + .unwrap(); + + // then + watch_sender.send(State::Stopped).unwrap(); + + assert_eq!( + FuelPeerId::from(peer_id.to_bytes().to_vec()), + report_peer_id + ); + assert_eq!( + report, + heartbeat_peer_reputation_config.low_heartbeat_frequency_penalty + ); + assert_eq!(reporting_service, "p2p"); + } + + #[tokio::test] + async fn peer_heartbeat_reputation_checks__old_heartbeat_sends_reports() { + // given + let peer_id = PeerId::random(); + // under the limit + let last_duration = Duration::from_secs(5); + let last_heartbeat = Instant::now() - Duration::from_secs(50); + let last_heartbeat_sys = SystemTime::now() - Duration::from_secs(50); + let mut durations = VecDeque::new(); + durations.push_front(last_duration); + + let heartbeat_data = HeartbeatData { + block_height: None, + last_heartbeat, + last_heartbeat_sys, + window: 0, + durations, + }; + let peer_info = PeerInfo { + peer_addresses: Default::default(), + client_version: None, + heartbeat_data, + score: 100.0, + }; + let peer_info = vec![(peer_id, peer_info)]; + let p2p_service = FakeP2PService { + peer_info, + next_event_stream: Box::pin(futures::stream::pending()), + }; + let (request_sender, request_receiver) = mpsc::channel(100); + + let (report_sender, mut report_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: report_sender, + confirmation_gossip_broadcast: mpsc::channel(100).0, + }; + + // Greater than actual + let heartbeat_max_avg_interval = Duration::from_secs(20); + // Less than actual + let heartbeat_max_time_since_last = Duration::from_secs(40); + + // Arbitrary values + let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { + old_heartbeat_penalty: 5.6, + low_heartbeat_frequency_penalty: 20.45, + }; + + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + view_provider: FakeDB, + tx_pool: FakeTxPool, + next_block_height: FakeBlockImporter.next_block_height(), + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval, + heartbeat_max_time_since_last, + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); + let mut watcher = StateWatcher::from(watch_receiver); + + // when + // we run this in a loop to ensure that the task is run until it reports + let (report_peer_id, report, reporting_service) = tokio::time::timeout( + Duration::from_secs(1), + wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), + ) + .await + .unwrap(); + + // then + watch_sender.send(State::Stopped).unwrap(); + + assert_eq!( + FuelPeerId::from(peer_id.to_bytes().to_vec()), + report_peer_id + ); + assert_eq!( + report, + heartbeat_peer_reputation_config.old_heartbeat_penalty + ); + assert_eq!(reporting_service, "p2p"); + } + + async fn wait_until_report_received( + report_receiver: &mut Receiver<(FuelPeerId, AppScore, String)>, + task: &mut Task, + watcher: &mut StateWatcher, + ) -> (FuelPeerId, AppScore, String) { + loop { + task.run(watcher).await; + if let Ok((peer_id, recv_report, service)) = report_receiver.try_recv() { + return (peer_id, recv_report, service); + } + } + } + + #[tokio::test] + async fn should_process_all_imported_block_under_infinite_events_from_p2p() { + // Given + let (blocks_processed_sender, mut block_processed_receiver) = mpsc::channel(1); + let next_block_height = Box::pin(futures::stream::repeat_with(move || { + blocks_processed_sender.try_send(()).unwrap(); + BlockHeight::from(0) + })); + let infinite_event_stream = Box::pin(futures::stream::empty()); + let p2p_service = FakeP2PService { + peer_info: vec![], + next_event_stream: infinite_event_stream, + }; + + // Initialization + let (request_sender, request_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: mpsc::channel(100).0, + confirmation_gossip_broadcast: mpsc::channel(100).0, + }; + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + tx_pool: FakeTxPool, + view_provider: FakeDB, + next_block_height, + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval: Default::default(), + heartbeat_max_time_since_last: Default::default(), + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: Default::default(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + let mut watcher = StateWatcher::started(); + // End of initialization + + for _ in 0..100 { + // When + task.run(&mut watcher).await; + + // Then + block_processed_receiver + .try_recv() + .expect("Should process the block height even under p2p pressure"); + } + } + + fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { + let peer_id = PeerId::random(); + let message_id = vec![1, 2, 3, 4, 5].into(); + let topic_hash = TopicHash::from_raw(TX_PRECONFIRMATIONS_GOSSIP_TOPIC); + let confirmations = PreConfirmationMessage::default_test_confirmation(); + let message = GossipsubMessage::TxPreConfirmations(confirmations); + FuelP2PEvent::GossipsubMessage { + peer_id, + message_id, + topic_hash, + message, + } + } + + #[tokio::test] + async fn run__gossip_message_from_p2p_service_is_broadcasted__tx_confirmations() { + // given + let gossip_message_event = arb_tx_confirmation_gossip_message(); + let events = vec![gossip_message_event.clone()]; + let event_stream = futures::stream::iter(events); + let p2p_service = FakeP2PService { + peer_info: vec![], + next_event_stream: Box::pin(event_stream), + }; + let (confirmations_sender, mut confirmations_receiver) = mpsc::channel(100); + let broadcast = FakeBroadcast { + peer_reports: mpsc::channel(100).0, + confirmation_gossip_broadcast: confirmations_sender, + }; + let (request_sender, request_receiver) = mpsc::channel(100); + let mut task = Task { + chain_id: Default::default(), + response_timeout: Default::default(), + p2p_service, + view_provider: FakeDB, + next_block_height: FakeBlockImporter.next_block_height(), + tx_pool: FakeTxPool, + request_receiver, + request_sender, + db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), + tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), + broadcast, + max_headers_per_request: 0, + max_txs_per_request: 100, + heartbeat_check_interval: Duration::from_secs(0), + heartbeat_max_avg_interval: Default::default(), + heartbeat_max_time_since_last: Default::default(), + next_check_time: Instant::now(), + heartbeat_peer_reputation_config: Default::default(), + cached_view: Arc::new(CachedView::new(100, false)), + }; + + // when + let mut watcher = StateWatcher::started(); + task.run(&mut watcher).await; + tokio::time::sleep(Duration::from_millis(100)).await; + + // then + let actual = confirmations_receiver.try_recv().unwrap().data.unwrap(); + let FuelP2PEvent::GossipsubMessage { message, .. } = gossip_message_event else { + panic!("Expected GossipsubMessage event"); + }; + let GossipsubMessage::TxPreConfirmations(expected) = message else { + panic!("Expected Confirmations message"); + }; + assert_eq!(expected, actual); + } +} + +#[cfg(test)] +pub mod broadcast_tests { + + #![allow(non_snake_case)] + + use super::*; + use fuel_core_types::services::p2p::PreConfirmationMessage; + + fn arb_shared_state() -> SharedState { + let config = Config::default("test network"); + let (shared_state, _) = build_shared_state(config); + shared_state + } + + #[tokio::test] + async fn shared_state__broadcast__tx_confirmations() { + // given + let broadcast = arb_shared_state(); + let confirmations = PreConfirmationMessage::default_test_confirmation(); + let confirmations_gossip_data = ConfirmationsGossipData { + data: Some(confirmations.clone()), + peer_id: FuelPeerId::from(PeerId::random().to_bytes().to_vec()), + message_id: vec![1, 2, 3, 4], + }; + let mut confirmations_receiver = broadcast.subscribe_confirmations(); + + // when + broadcast + .confirmations_broadcast(confirmations_gossip_data) + .unwrap(); + + // then + let actual = confirmations_receiver.try_recv().unwrap().data.unwrap(); + assert_eq!(confirmations, actual); + } +} diff --git a/crates/services/p2p/src/service/broadcast_tests.rs b/crates/services/p2p/src/service/broadcast_tests.rs deleted file mode 100644 index beae827c918..00000000000 --- a/crates/services/p2p/src/service/broadcast_tests.rs +++ /dev/null @@ -1,32 +0,0 @@ -#![allow(non_snake_case)] - -use super::*; -use fuel_core_types::services::p2p::PreConfirmationMessage; - -fn arb_shared_state() -> SharedState { - let config = Config::default("test network"); - let (shared_state, _) = build_shared_state(config); - shared_state -} - -#[tokio::test] -async fn shared_state__broadcast__tx_confirmations() { - // given - let broadcast = arb_shared_state(); - let confirmations = PreConfirmationMessage::default_test_confirmation(); - let confirmations_gossip_data = ConfirmationsGossipData { - data: Some(confirmations.clone()), - peer_id: FuelPeerId::from(PeerId::random().to_bytes().to_vec()), - message_id: vec![1, 2, 3, 4], - }; - let mut confirmations_receiver = broadcast.subscribe_confirmations(); - - // when - broadcast - .confirmations_broadcast(confirmations_gossip_data) - .unwrap(); - - // then - let actual = confirmations_receiver.try_recv().unwrap().data.unwrap(); - assert_eq!(confirmations, actual); -} diff --git a/crates/services/p2p/src/service/task_tests.rs b/crates/services/p2p/src/service/task_tests.rs deleted file mode 100644 index 7c7b3e56a95..00000000000 --- a/crates/services/p2p/src/service/task_tests.rs +++ /dev/null @@ -1,571 +0,0 @@ -#![allow(non_snake_case)] -use crate::ports::P2pDb; - -use super::*; - -use crate::{ - gossipsub::topics::TX_PRECONFIRMATIONS_GOSSIP_TOPIC, - peer_manager::heartbeat_data::HeartbeatData, -}; -use fuel_core_services::{ - Service, - State, -}; -use fuel_core_storage::Result as StorageResult; -use fuel_core_types::{ - blockchain::consensus::Genesis, - fuel_types::BlockHeight, - services::p2p::PreConfirmationMessage, -}; -use futures::FutureExt; -use libp2p::gossipsub::TopicHash; -use std::{ - collections::VecDeque, - time::SystemTime, -}; - -#[derive(Clone, Debug)] -struct FakeDb; - -impl AtomicView for FakeDb { - type LatestView = Self; - - fn latest_view(&self) -> StorageResult { - Ok(self.clone()) - } -} - -impl P2pDb for FakeDb { - fn get_sealed_headers( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - unimplemented!() - } - - fn get_transactions( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - unimplemented!() - } - - fn get_genesis(&self) -> StorageResult { - Ok(Default::default()) - } -} - -#[derive(Clone, Debug)] -struct FakeBlockImporter; - -impl BlockHeightImporter for FakeBlockImporter { - fn next_block_height(&self) -> BoxStream { - Box::pin(fuel_core_services::stream::pending()) - } -} - -#[derive(Clone, Debug)] -struct FakeTxPool; - -impl TxPool for FakeTxPool { - async fn get_tx_ids( - &self, - _max_txs: usize, - ) -> anyhow::Result> { - Ok(vec![]) - } - - async fn get_full_txs( - &self, - tx_ids: Vec, - ) -> anyhow::Result>> { - Ok(tx_ids.iter().map(|_| None).collect()) - } -} - -#[tokio::test] -async fn start_and_stop_awaits_works() { - let p2p_config = Config::::default("start_stop_works"); - let (shared_state, request_receiver) = build_shared_state(p2p_config.clone()); - let service = new_service( - ChainId::default(), - 0.into(), - p2p_config, - shared_state, - request_receiver, - FakeDb, - FakeBlockImporter, - FakeTxPool, - ); - - // Node with p2p service started - assert!(service.start_and_await().await.unwrap().started()); - // Node with p2p service stopped - assert!(service.stop_and_await().await.unwrap().stopped()); -} - -struct FakeP2PService { - peer_info: Vec<(PeerId, PeerInfo)>, - next_event_stream: BoxStream, -} - -impl TaskP2PService for FakeP2PService { - fn update_metrics(&self, _: T) - where - T: FnOnce(), - { - unimplemented!() - } - - fn get_all_peer_info(&self) -> Vec<(&PeerId, &PeerInfo)> { - self.peer_info.iter().map(|tup| (&tup.0, &tup.1)).collect() - } - - fn get_peer_id_with_height(&self, _height: &BlockHeight) -> Option { - todo!() - } - - fn next_event(&mut self) -> BoxFuture<'_, Option> { - self.next_event_stream.next().boxed() - } - - fn publish_message( - &mut self, - _message: GossipsubBroadcastRequest, - ) -> anyhow::Result<()> { - todo!() - } - - fn send_request_msg( - &mut self, - _peer_id: Option, - _request_msg: RequestMessage, - _on_response: ResponseSender, - ) -> anyhow::Result<()> { - todo!() - } - - fn send_response_msg( - &mut self, - _request_id: InboundRequestId, - _message: V2ResponseMessage, - ) -> anyhow::Result<()> { - todo!() - } - - fn report_message( - &mut self, - _message: GossipsubMessageInfo, - _acceptance: GossipsubMessageAcceptance, - ) -> anyhow::Result<()> { - todo!() - } - - fn report_peer( - &mut self, - _peer_id: PeerId, - _score: AppScore, - _reporting_service: &str, - ) -> anyhow::Result<()> { - todo!() - } - - fn update_block_height(&mut self, _height: BlockHeight) -> anyhow::Result<()> { - Ok(()) - } -} - -#[derive(Clone)] -struct FakeDB; - -impl AtomicView for FakeDB { - type LatestView = Self; - - fn latest_view(&self) -> StorageResult { - Ok(self.clone()) - } -} - -impl P2pDb for FakeDB { - fn get_sealed_headers( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - todo!() - } - - fn get_transactions( - &self, - _block_height_range: Range, - ) -> StorageResult>> { - todo!() - } - - fn get_genesis(&self) -> StorageResult { - todo!() - } -} - -struct FakeBroadcast { - pub peer_reports: mpsc::Sender<(FuelPeerId, AppScore, String)>, - pub confirmation_gossip_broadcast: mpsc::Sender, -} - -impl Broadcast for FakeBroadcast { - fn report_peer( - &self, - peer_id: FuelPeerId, - report: AppScore, - reporting_service: &'static str, - ) -> anyhow::Result<()> { - self.peer_reports - .try_send((peer_id, report, reporting_service.to_string()))?; - Ok(()) - } - - fn block_height_broadcast( - &self, - _block_height_data: BlockHeightHeartbeatData, - ) -> anyhow::Result<()> { - todo!() - } - - fn tx_broadcast(&self, _transaction: TransactionGossipData) -> anyhow::Result<()> { - todo!() - } - - fn confirmations_broadcast( - &self, - confirmations: ConfirmationsGossipData, - ) -> anyhow::Result<()> { - self.confirmation_gossip_broadcast.try_send(confirmations)?; - Ok(()) - } - - fn new_tx_subscription_broadcast(&self, _peer_id: FuelPeerId) -> anyhow::Result<()> { - todo!() - } -} - -#[tokio::test] -async fn peer_heartbeat_reputation_checks__slow_heartbeat_sends_reports() { - // given - let peer_id = PeerId::random(); - // more than limit - let last_duration = Duration::from_secs(30); - let mut durations = VecDeque::new(); - durations.push_front(last_duration); - - let heartbeat_data = HeartbeatData { - block_height: None, - last_heartbeat: Instant::now(), - last_heartbeat_sys: SystemTime::now(), - window: 0, - durations, - }; - let peer_info = PeerInfo { - peer_addresses: Default::default(), - client_version: None, - heartbeat_data, - score: 100.0, - }; - let peer_info = vec![(peer_id, peer_info)]; - let p2p_service = FakeP2PService { - peer_info, - next_event_stream: Box::pin(futures::stream::pending()), - }; - let (request_sender, request_receiver) = mpsc::channel(100); - - let (report_sender, mut report_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: report_sender, - confirmation_gossip_broadcast: mpsc::channel(100).0, - }; - - // Less than actual - let heartbeat_max_avg_interval = Duration::from_secs(20); - // Greater than actual - let heartbeat_max_time_since_last = Duration::from_secs(40); - - // Arbitrary values - let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { - old_heartbeat_penalty: 5.6, - low_heartbeat_frequency_penalty: 20.45, - }; - - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - view_provider: FakeDB, - next_block_height: FakeBlockImporter.next_block_height(), - tx_pool: FakeTxPool, - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval, - heartbeat_max_time_since_last, - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); - let mut watcher = StateWatcher::from(watch_receiver); - - // when - let (report_peer_id, report, reporting_service) = tokio::time::timeout( - Duration::from_secs(1), - wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), - ) - .await - .unwrap(); - - // then - watch_sender.send(State::Stopped).unwrap(); - - assert_eq!( - FuelPeerId::from(peer_id.to_bytes().to_vec()), - report_peer_id - ); - assert_eq!( - report, - heartbeat_peer_reputation_config.low_heartbeat_frequency_penalty - ); - assert_eq!(reporting_service, "p2p"); -} - -#[tokio::test] -async fn peer_heartbeat_reputation_checks__old_heartbeat_sends_reports() { - // given - let peer_id = PeerId::random(); - // under the limit - let last_duration = Duration::from_secs(5); - let last_heartbeat = Instant::now() - Duration::from_secs(50); - let last_heartbeat_sys = SystemTime::now() - Duration::from_secs(50); - let mut durations = VecDeque::new(); - durations.push_front(last_duration); - - let heartbeat_data = HeartbeatData { - block_height: None, - last_heartbeat, - last_heartbeat_sys, - window: 0, - durations, - }; - let peer_info = PeerInfo { - peer_addresses: Default::default(), - client_version: None, - heartbeat_data, - score: 100.0, - }; - let peer_info = vec![(peer_id, peer_info)]; - let p2p_service = FakeP2PService { - peer_info, - next_event_stream: Box::pin(futures::stream::pending()), - }; - let (request_sender, request_receiver) = mpsc::channel(100); - - let (report_sender, mut report_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: report_sender, - confirmation_gossip_broadcast: mpsc::channel(100).0, - }; - - // Greater than actual - let heartbeat_max_avg_interval = Duration::from_secs(20); - // Less than actual - let heartbeat_max_time_since_last = Duration::from_secs(40); - - // Arbitrary values - let heartbeat_peer_reputation_config = HeartbeatPeerReputationConfig { - old_heartbeat_penalty: 5.6, - low_heartbeat_frequency_penalty: 20.45, - }; - - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - view_provider: FakeDB, - tx_pool: FakeTxPool, - next_block_height: FakeBlockImporter.next_block_height(), - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval, - heartbeat_max_time_since_last, - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: heartbeat_peer_reputation_config.clone(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let (watch_sender, watch_receiver) = tokio::sync::watch::channel(State::Started); - let mut watcher = StateWatcher::from(watch_receiver); - - // when - // we run this in a loop to ensure that the task is run until it reports - let (report_peer_id, report, reporting_service) = tokio::time::timeout( - Duration::from_secs(1), - wait_until_report_received(&mut report_receiver, &mut task, &mut watcher), - ) - .await - .unwrap(); - - // then - watch_sender.send(State::Stopped).unwrap(); - - assert_eq!( - FuelPeerId::from(peer_id.to_bytes().to_vec()), - report_peer_id - ); - assert_eq!( - report, - heartbeat_peer_reputation_config.old_heartbeat_penalty - ); - assert_eq!(reporting_service, "p2p"); -} - -async fn wait_until_report_received( - report_receiver: &mut Receiver<(FuelPeerId, AppScore, String)>, - task: &mut Task, - watcher: &mut StateWatcher, -) -> (FuelPeerId, AppScore, String) { - loop { - task.run(watcher).await; - if let Ok((peer_id, recv_report, service)) = report_receiver.try_recv() { - return (peer_id, recv_report, service); - } - } -} - -#[tokio::test] -async fn should_process_all_imported_block_under_infinite_events_from_p2p() { - // Given - let (blocks_processed_sender, mut block_processed_receiver) = mpsc::channel(1); - let next_block_height = Box::pin(futures::stream::repeat_with(move || { - blocks_processed_sender.try_send(()).unwrap(); - BlockHeight::from(0) - })); - let infinite_event_stream = Box::pin(futures::stream::empty()); - let p2p_service = FakeP2PService { - peer_info: vec![], - next_event_stream: infinite_event_stream, - }; - - // Initialization - let (request_sender, request_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: mpsc::channel(100).0, - confirmation_gossip_broadcast: mpsc::channel(100).0, - }; - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - tx_pool: FakeTxPool, - view_provider: FakeDB, - next_block_height, - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval: Default::default(), - heartbeat_max_time_since_last: Default::default(), - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: Default::default(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - let mut watcher = StateWatcher::started(); - // End of initialization - - for _ in 0..100 { - // When - task.run(&mut watcher).await; - - // Then - block_processed_receiver - .try_recv() - .expect("Should process the block height even under p2p pressure"); - } -} - -fn arb_tx_confirmation_gossip_message() -> FuelP2PEvent { - let peer_id = PeerId::random(); - let message_id = vec![1, 2, 3, 4, 5].into(); - let topic_hash = TopicHash::from_raw(TX_PRECONFIRMATIONS_GOSSIP_TOPIC); - let confirmations = PreConfirmationMessage::default_test_confirmation(); - let message = GossipsubMessage::TxPreConfirmations(confirmations); - FuelP2PEvent::GossipsubMessage { - peer_id, - message_id, - topic_hash, - message, - } -} - -#[tokio::test] -async fn run__gossip_message_from_p2p_service_is_broadcasted__tx_confirmations() { - // given - let gossip_message_event = arb_tx_confirmation_gossip_message(); - let events = vec![gossip_message_event.clone()]; - let event_stream = futures::stream::iter(events); - let p2p_service = FakeP2PService { - peer_info: vec![], - next_event_stream: Box::pin(event_stream), - }; - let (confirmations_sender, mut confirmations_receiver) = mpsc::channel(100); - let broadcast = FakeBroadcast { - peer_reports: mpsc::channel(100).0, - confirmation_gossip_broadcast: confirmations_sender, - }; - let (request_sender, request_receiver) = mpsc::channel(100); - let mut task = Task { - chain_id: Default::default(), - response_timeout: Default::default(), - p2p_service, - view_provider: FakeDB, - next_block_height: FakeBlockImporter.next_block_height(), - tx_pool: FakeTxPool, - request_receiver, - request_sender, - db_heavy_task_processor: SyncProcessor::new("Test", 1, 1).unwrap(), - tx_pool_heavy_task_processor: AsyncProcessor::new("Test", 1, 1).unwrap(), - broadcast, - max_headers_per_request: 0, - max_txs_per_request: 100, - heartbeat_check_interval: Duration::from_secs(0), - heartbeat_max_avg_interval: Default::default(), - heartbeat_max_time_since_last: Default::default(), - next_check_time: Instant::now(), - heartbeat_peer_reputation_config: Default::default(), - cached_view: Arc::new(CachedView::new(100, false)), - }; - - // when - let mut watcher = StateWatcher::started(); - task.run(&mut watcher).await; - tokio::time::sleep(Duration::from_millis(100)).await; - - // then - let actual = confirmations_receiver.try_recv().unwrap().data.unwrap(); - let FuelP2PEvent::GossipsubMessage { message, .. } = gossip_message_event else { - panic!("Expected GossipsubMessage event"); - }; - let GossipsubMessage::TxPreConfirmations(expected) = message else { - panic!("Expected Confirmations message"); - }; - assert_eq!(expected, actual); -} From 226e8f2b6ac70f6c15960f0b0f12b72c4c2f8fcc Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Sun, 23 Feb 2025 12:23:46 -0700 Subject: [PATCH 18/20] Appease Clippy-sama --- crates/services/p2p/src/service.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index ad9c806fc0c..180c38a7c9c 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -1901,7 +1901,7 @@ pub mod task_tests { watcher: &mut StateWatcher, ) -> (FuelPeerId, AppScore, String) { loop { - task.run(watcher).await; + let _ = task.run(watcher).await; if let Ok((peer_id, recv_report, service)) = report_receiver.try_recv() { return (peer_id, recv_report, service); } @@ -1954,7 +1954,7 @@ pub mod task_tests { for _ in 0..100 { // When - task.run(&mut watcher).await; + let _ = task.run(&mut watcher).await; // Then block_processed_receiver @@ -2017,7 +2017,7 @@ pub mod task_tests { // when let mut watcher = StateWatcher::started(); - task.run(&mut watcher).await; + let _ = task.run(&mut watcher).await; tokio::time::sleep(Duration::from_millis(100)).await; // then From 7f82371c9e334e0425a255ad71774784ab8689c0 Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Mon, 24 Feb 2025 09:32:49 -0700 Subject: [PATCH 19/20] Rename trait method --- crates/services/p2p/src/service.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 180c38a7c9c..7668c5a25bc 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -373,7 +373,7 @@ pub trait Broadcast: Send { fn tx_broadcast(&self, transaction: TransactionGossipData) -> anyhow::Result<()>; - fn confirmations_broadcast( + fn pre_confirmation_broadcast( &self, confirmations: ConfirmationsGossipData, ) -> anyhow::Result<()>; @@ -404,7 +404,7 @@ impl Broadcast for SharedState { Ok(()) } - fn confirmations_broadcast( + fn pre_confirmation_broadcast( &self, confirmations: ConfirmationsGossipData, ) -> anyhow::Result<()> { @@ -474,7 +474,7 @@ impl Task { } GossipsubMessage::TxPreConfirmations(confirmations) => { let data = GossipData::new(confirmations, peer_id, message_id); - let _ = self.broadcast.confirmations_broadcast(data); + let _ = self.broadcast.pre_confirmation_broadcast(data); } } } @@ -1692,7 +1692,7 @@ pub mod task_tests { todo!() } - fn confirmations_broadcast( + fn pre_confirmation_broadcast( &self, confirmations: ConfirmationsGossipData, ) -> anyhow::Result<()> { @@ -2060,7 +2060,7 @@ pub mod broadcast_tests { // when broadcast - .confirmations_broadcast(confirmations_gossip_data) + .pre_confirmation_broadcast(confirmations_gossip_data) .unwrap(); // then From e14289720fb082cf2e9d059f8d6ca947f8d4803e Mon Sep 17 00:00:00 2001 From: Mitch Turner Date: Tue, 25 Feb 2025 16:24:17 -0700 Subject: [PATCH 20/20] Rename topic --- crates/services/p2p/src/gossipsub/topics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/p2p/src/gossipsub/topics.rs b/crates/services/p2p/src/gossipsub/topics.rs index 8e2d585bd38..276965793d7 100644 --- a/crates/services/p2p/src/gossipsub/topics.rs +++ b/crates/services/p2p/src/gossipsub/topics.rs @@ -10,7 +10,7 @@ use super::messages::{ }; pub const NEW_TX_GOSSIP_TOPIC: &str = "new_tx"; -pub const TX_PRECONFIRMATIONS_GOSSIP_TOPIC: &str = "tx_confirmations"; +pub const TX_PRECONFIRMATIONS_GOSSIP_TOPIC: &str = "tx_preconfirmations"; /// Holds used Gossipsub Topics /// Each field contains TopicHash of existing topics