From 05bd395aa2bd8a001bcafda24115b732709656a5 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 16:08:09 -0600 Subject: [PATCH 01/32] refactor: add PeerAddr enum for explicit address state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 1.1 of peer identity restructuring (issue #2164). Adds PeerAddr enum with Unknown and Known(SocketAddr) variants to explicitly represent when a peer's address is unknown (e.g., sender doesn't know their own external address behind NAT) vs when it's known. This enum will be used in Connect messages to allow the first recipient to fill in the sender's address from the packet source. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/ring/mod.rs | 3 +- crates/core/src/ring/peer_key_location.rs | 53 +++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/crates/core/src/ring/mod.rs b/crates/core/src/ring/mod.rs index 38c0acc75..6b2bf4a7e 100644 --- a/crates/core/src/ring/mod.rs +++ b/crates/core/src/ring/mod.rs @@ -46,7 +46,8 @@ pub(crate) use self::transient_manager::TransientConnectionManager; pub use self::live_tx::LiveTransactionTracker; pub use connection::Connection; pub use location::{Distance, Location}; -pub use peer_key_location::PeerKeyLocation; +#[allow(unused_imports)] // PeerAddr will be used as refactoring progresses +pub use peer_key_location::{PeerAddr, PeerKeyLocation}; /// Thread safe and friendly data structure to keep track of the local knowledge /// of the state of the ring. diff --git a/crates/core/src/ring/peer_key_location.rs b/crates/core/src/ring/peer_key_location.rs index 53aa67996..c33116f65 100644 --- a/crates/core/src/ring/peer_key_location.rs +++ b/crates/core/src/ring/peer_key_location.rs @@ -1,8 +1,61 @@ use super::Location; use crate::node::PeerId; use serde::{Deserialize, Serialize}; +use std::net::SocketAddr; use std::{fmt::Display, hash::Hash}; +/// Explicit representation of peer address state. +/// +/// This enum distinguishes between: +/// - `Unknown`: The address is not known by the sender (e.g., a peer doesn't know its own external address). +/// The first recipient should fill this in from the packet source address. +/// - `Known`: The address is known and explicitly specified. +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Debug)] +#[allow(dead_code)] // Will be used as refactoring progresses +pub enum PeerAddr { + /// Address unknown - will be filled in by first recipient from packet source. + /// Used when a peer doesn't know its own external address (e.g., behind NAT). + Unknown, + /// Known address - explicitly specified. + Known(SocketAddr), +} + +#[allow(dead_code)] // Will be used as refactoring progresses +impl PeerAddr { + /// Returns the socket address if known, None otherwise. + pub fn as_known(&self) -> Option<&SocketAddr> { + match self { + PeerAddr::Known(addr) => Some(addr), + PeerAddr::Unknown => None, + } + } + + /// Returns true if the address is known. + pub fn is_known(&self) -> bool { + matches!(self, PeerAddr::Known(_)) + } + + /// Returns true if the address is unknown. + pub fn is_unknown(&self) -> bool { + matches!(self, PeerAddr::Unknown) + } +} + +impl From for PeerAddr { + fn from(addr: SocketAddr) -> Self { + PeerAddr::Known(addr) + } +} + +impl std::fmt::Display for PeerAddr { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PeerAddr::Unknown => write!(f, ""), + PeerAddr::Known(addr) => write!(f, "{}", addr), + } + } +} + #[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr(test, derive(arbitrary::Arbitrary))] /// The location of a peer in the ring. This location allows routing towards the peer. From 18ccae1dd05cecb7b15988d18416959b18ac2466 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 16:11:45 -0600 Subject: [PATCH 02/32] refactor: add accessor methods to PeerKeyLocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 1.2 of peer identity restructuring (issue #2164). Adds pub_key() and addr() methods to PeerKeyLocation to provide a cleaner interface that doesn't expose the internal PeerId structure. Also adds new() and with_location() constructors. These methods will allow gradual migration from .peer.pub_key and .peer.addr to the new accessors before removing the peer field. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/ring/peer_key_location.rs | 33 +++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/crates/core/src/ring/peer_key_location.rs b/crates/core/src/ring/peer_key_location.rs index c33116f65..75db9bbe9 100644 --- a/crates/core/src/ring/peer_key_location.rs +++ b/crates/core/src/ring/peer_key_location.rs @@ -1,5 +1,6 @@ use super::Location; use crate::node::PeerId; +use crate::transport::TransportPublicKey; use serde::{Deserialize, Serialize}; use std::net::SocketAddr; use std::{fmt::Display, hash::Hash}; @@ -67,6 +68,38 @@ pub struct PeerKeyLocation { } impl PeerKeyLocation { + /// Creates a new PeerKeyLocation with the given public key and address. + pub fn new(pub_key: TransportPublicKey, addr: SocketAddr) -> Self { + PeerKeyLocation { + peer: PeerId::new(addr, pub_key), + location: None, + } + } + + /// Creates a new PeerKeyLocation with the given public key and address, plus explicit location. + pub fn with_location( + pub_key: TransportPublicKey, + addr: SocketAddr, + location: Location, + ) -> Self { + PeerKeyLocation { + peer: PeerId::new(addr, pub_key), + location: Some(location), + } + } + + /// Returns the peer's public key (identity). + #[inline] + pub fn pub_key(&self) -> &TransportPublicKey { + &self.peer.pub_key + } + + /// Returns the peer's socket address. + #[inline] + pub fn addr(&self) -> SocketAddr { + self.peer.addr + } + #[cfg(test)] pub fn random() -> Self { PeerKeyLocation { From 74124312f4f681877d63e8b12adea5f78de4dc9f Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 16:17:59 -0600 Subject: [PATCH 03/32] refactor: migrate PeerKeyLocation field accesses to use new methods MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 1.3 of peer identity restructuring (issue #2164). Uses rust-analyzer SSR to convert: - .peer.pub_key -> .pub_key() - .peer.addr -> .addr() (for read accesses) Assignment operations (.peer.addr = x) are kept as direct field access for now since the addr() method returns a copy, not a reference. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../src/node/network_bridge/p2p_protoc.rs | 12 ++++---- crates/core/src/operations/connect.rs | 29 ++++++++++++------- crates/core/src/tracing/mod.rs | 2 +- .../core/src/transport/connection_handler.rs | 2 +- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index af35d4b89..abb41a046 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -384,8 +384,8 @@ impl P2pConnManager { .connections .get(&target_peer.peer) .or_else(|| { - if target_peer.peer.addr.ip().is_unspecified() { - ctx.connection_entry_by_pub_key(&target_peer.peer.pub_key) + if target_peer.addr().ip().is_unspecified() { + ctx.connection_entry_by_pub_key(target_peer.pub_key()) .map(|(existing_peer, sender)| { tracing::info!( tx = %msg.id(), @@ -841,7 +841,7 @@ impl P2pConnManager { for conn in conns { connected_peers.push(( conn.location.peer.to_string(), - conn.location.peer.addr.to_string(), + conn.location.addr().to_string(), )); } } @@ -941,7 +941,7 @@ impl P2pConnManager { response.connected_peers_detailed.push( ConnectedPeerInfo { peer_id: conn.location.peer.to_string(), - address: conn.location.peer.addr.to_string(), + address: conn.location.addr().to_string(), }, ); } @@ -1913,8 +1913,8 @@ impl P2pConnManager { if let Some(remote_addr) = inbound.remote_addr { if let Some(sender_peer) = extract_sender_from_message(&inbound.msg) { - if sender_peer.peer.addr == remote_addr - || sender_peer.peer.addr.ip().is_unspecified() + if sender_peer.addr() == remote_addr + || sender_peer.addr().ip().is_unspecified() { let mut new_peer_id = sender_peer.peer.clone(); if new_peer_id.addr.ip().is_unspecified() { diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index b99d7013c..11217ad96 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -642,7 +642,7 @@ impl ConnectOp { tx, desired_location, target_connections, - Some(own.peer.addr), + Some(own.addr()), Some(target.clone()), None, connect_forward_estimator, @@ -1295,7 +1295,7 @@ mod tests { joiner: joiner.clone(), ttl: 3, visited: vec![], - observed_addr: Some(joiner.peer.addr), + observed_addr: Some(joiner.addr()), }, forwarded_to: None, observed_sent: false, @@ -1327,7 +1327,7 @@ mod tests { joiner: joiner.clone(), ttl: 2, visited: vec![], - observed_addr: Some(joiner.peer.addr), + observed_addr: Some(joiner.addr()), }, forwarded_to: None, observed_sent: false, @@ -1356,7 +1356,7 @@ mod tests { let joiner = make_peer(5050); let observed_addr = SocketAddr::new( IpAddr::V4(Ipv4Addr::new(203, 0, 113, 10)), - joiner.peer.addr.port(), + joiner.addr().port(), ); let mut state = RelayState { upstream: joiner.clone(), @@ -1383,8 +1383,14 @@ mod tests { .observed_address .expect("expected observed address update"); assert_eq!(addr, observed_addr); - assert_eq!(target.peer.addr, observed_addr); - assert_eq!(state.request.joiner.peer.addr, observed_addr); + assert_eq!(target.addr(), observed_addr); + // After handling, the joiner should be upgraded to Known with the observed address + let joiner_peer = state + .request + .joiner + .peer_id() + .expect("joiner should be Known after observed_addr"); + assert_eq!(joiner_peer.addr, observed_addr); } #[test] @@ -1454,7 +1460,7 @@ mod tests { joiner: joiner.clone(), ttl: 3, visited: vec![joiner.clone()], - observed_addr: Some(joiner.peer.addr), + observed_addr: Some(joiner.addr()), }; let tx = Transaction::new::(); @@ -1559,14 +1565,17 @@ mod tests { .response_target .expect("response_target should be set when accepting"); assert_eq!( - response_target.peer.addr, observed_public_addr, + response_target.addr(), + observed_public_addr, "response_target must use observed external address ({}) not private address ({})", - observed_public_addr, private_addr + observed_public_addr, + private_addr ); // Double-check: the original joiner had the private address assert_eq!( - joiner.peer.addr, private_addr, + joiner.addr(), + private_addr, "original joiner should have private address" ); } diff --git a/crates/core/src/tracing/mod.rs b/crates/core/src/tracing/mod.rs index eda119292..de12b5ee0 100644 --- a/crates/core/src/tracing/mod.rs +++ b/crates/core/src/tracing/mod.rs @@ -1552,7 +1552,7 @@ pub(super) mod test { .flat_map(|dcs| dcs.iter()) .any(|dc| dc > &l.datetime); if let Some((this_loc, conn_loc)) = this.location.zip(connected.location) { - if &this.peer.pub_key == key && !disconnected { + if this.pub_key() == key && !disconnected { return Some((connected.peer.clone(), conn_loc.distance(this_loc))); } } diff --git a/crates/core/src/transport/connection_handler.rs b/crates/core/src/transport/connection_handler.rs index 7b26e75fd..7a4476bb1 100644 --- a/crates/core/src/transport/connection_handler.rs +++ b/crates/core/src/transport/connection_handler.rs @@ -88,7 +88,7 @@ pub(crate) async fn create_connection_handler( Some(Arc::new( known_gateways .iter() - .map(|g| g.peer.addr) + .map(|g| g.addr()) .collect::>(), )) }; From 075e75d966c4f0f3918244ed6a2388f1d8803f7b Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 17:16:27 -0600 Subject: [PATCH 04/32] refactor(ring): restructure PeerKeyLocation to separate identity from address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Key changes: - Replace `peer: PeerId` with `pub_key: TransportPublicKey` + `peer_addr: PeerAddr` - Add PeerAddr enum with Unknown/Known variants for explicit address state - Add accessor methods: pub_key(), addr(), socket_addr(), peer() - Add constructors: new(), with_unknown_addr(), with_location() - Implement Ord/PartialOrd based on socket address This separates cryptographic identity (pub_key) from network address (peer_addr), enabling proper handling of peers behind NAT who don't know their external address. Part of #2164 peer identity restructuring. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/client_events/mod.rs | 2 +- crates/core/src/node/mod.rs | 11 +- .../src/node/network_bridge/p2p_protoc.rs | 35 +++-- crates/core/src/node/op_state_manager.rs | 2 +- crates/core/src/node/p2p_impl.rs | 2 +- crates/core/src/operations/connect.rs | 148 ++++++++++-------- crates/core/src/operations/get.rs | 70 ++++----- crates/core/src/operations/mod.rs | 8 +- crates/core/src/operations/put.rs | 88 ++++++----- crates/core/src/operations/subscribe.rs | 74 ++++----- crates/core/src/operations/subscribe/tests.rs | 127 +++++++-------- crates/core/src/operations/update.rs | 76 ++++----- crates/core/src/ring/connection_manager.rs | 34 ++-- crates/core/src/ring/mod.rs | 21 +-- crates/core/src/ring/peer_key_location.rs | 147 +++++++++++++++-- crates/core/src/ring/seeding.rs | 53 ++++--- crates/core/src/router/isotonic_estimator.rs | 4 +- crates/core/src/topology/mod.rs | 2 +- crates/core/src/tracing/mod.rs | 60 +++---- 19 files changed, 547 insertions(+), 417 deletions(-) diff --git a/crates/core/src/client_events/mod.rs b/crates/core/src/client_events/mod.rs index 894bd1614..5fcf6d57f 100644 --- a/crates/core/src/client_events/mod.rs +++ b/crates/core/src/client_events/mod.rs @@ -422,7 +422,7 @@ async fn process_open_request( let own_location = op_manager.ring.connection_manager.own_location(); let has_remote_peers = op_manager .ring - .closest_potentially_caching(&contract_key, &[own_location.peer][..]) + .closest_potentially_caching(&contract_key, &[own_location.peer()][..]) .is_some(); if !has_remote_peers { diff --git a/crates/core/src/node/mod.rs b/crates/core/src/node/mod.rs index 9fd18e66a..187f3e224 100644 --- a/crates/core/src/node/mod.rs +++ b/crates/core/src/node/mod.rs @@ -369,9 +369,12 @@ impl NodeConfig { let gateways: Vec = self .gateways .iter() - .map(|node| PeerKeyLocation { - peer: node.peer_id.clone(), - location: Some(node.location), + .map(|node| { + PeerKeyLocation::with_location( + node.peer_id.pub_key.clone(), + node.peer_id.addr, + node.location, + ) }) .collect(); @@ -1174,7 +1177,7 @@ async fn handle_aborted_op( { let gateway = op.gateway().cloned(); if let Some(gateway) = gateway { - tracing::warn!("Retry connecting to gateway {}", gateway.peer); + tracing::warn!("Retry connecting to gateway {}", gateway.peer()); connect::join_ring_request(None, &gateway, op_manager).await?; } } diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index abb41a046..b1d3f2825 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -357,7 +357,7 @@ impl P2pConnManager { .connection_manager .get_peer_key() .unwrap(); - if target_peer.peer == self_peer_id { + if target_peer.peer() == self_peer_id { tracing::error!( tx = %msg.id(), msg_type = %msg, @@ -382,14 +382,14 @@ impl P2pConnManager { // removed by another task between those two calls. let peer_connection = ctx .connections - .get(&target_peer.peer) + .get(&target_peer.peer()) .or_else(|| { if target_peer.addr().ip().is_unspecified() { ctx.connection_entry_by_pub_key(target_peer.pub_key()) .map(|(existing_peer, sender)| { tracing::info!( tx = %msg.id(), - target_peer = %target_peer.peer, + target_peer = %target_peer.peer(), resolved_addr = %existing_peer.addr, "Resolved outbound connection using peer public key due to unspecified address" ); @@ -402,7 +402,7 @@ impl P2pConnManager { tracing::debug!( tx = %msg.id(), self_peer = %ctx.bridge.op_manager.ring.connection_manager.pub_key, - target = %target_peer.peer, + target = %target_peer.peer(), conn_map_size = ctx.connections.len(), has_connection = peer_connection.is_some(), "[CONN_TRACK] LOOKUP: Checking for existing connection in HashMap" @@ -425,14 +425,14 @@ impl P2pConnManager { None => { tracing::warn!( id = %msg.id(), - target = %target_peer.peer, + target = %target_peer.peer(), "No existing outbound connection, establishing connection first" ); // Queue the message for sending after connection is established let tx = *msg.id(); let (callback, mut result) = tokio::sync::mpsc::channel(10); - let target_peer_id = target_peer.peer.clone(); + let target_peer_id = target_peer.peer().clone(); let msg_clone = msg.clone(); let bridge_sender = ctx.bridge.ev_listener_tx.clone(); let self_peer_id = ctx @@ -448,7 +448,7 @@ impl P2pConnManager { ctx.bridge .ev_listener_tx .send(Right(NodeEvent::ConnectPeer { - peer: target_peer.peer.clone(), + peer: target_peer.peer().clone(), tx, callback, is_gw: false, @@ -840,7 +840,7 @@ impl P2pConnManager { for conns in connections_by_loc.values() { for conn in conns { connected_peers.push(( - conn.location.peer.to_string(), + conn.location.peer().to_string(), conn.location.addr().to_string(), )); } @@ -911,7 +911,7 @@ impl P2pConnManager { .map(|s| { s.value() .iter() - .map(|pk| pk.peer.to_string()) + .map(|pk| pk.peer().to_string()) .collect() }) .unwrap_or_default() @@ -937,10 +937,11 @@ impl P2pConnManager { use freenet_stdlib::client_api::ConnectedPeerInfo; for conns in connections_by_loc.values() { for conn in conns { - connected_peer_ids.push(conn.location.peer.to_string()); + connected_peer_ids + .push(conn.location.peer().to_string()); response.connected_peers_detailed.push( ConnectedPeerInfo { - peer_id: conn.location.peer.to_string(), + peer_id: conn.location.peer().to_string(), address: conn.location.addr().to_string(), }, ); @@ -949,7 +950,7 @@ impl P2pConnManager { } else { for conns in connections_by_loc.values() { connected_peer_ids.extend( - conns.iter().map(|c| c.location.peer.to_string()), + conns.iter().map(|c| c.location.peer().to_string()), ); } } @@ -1916,14 +1917,14 @@ impl P2pConnManager { if sender_peer.addr() == remote_addr || sender_peer.addr().ip().is_unspecified() { - let mut new_peer_id = sender_peer.peer.clone(); + let mut new_peer_id = sender_peer.peer().clone(); if new_peer_id.addr.ip().is_unspecified() { new_peer_id.addr = remote_addr; if let Some(sender_mut) = extract_sender_from_message_mut(&mut inbound.msg) { - if sender_mut.peer.addr.ip().is_unspecified() { - sender_mut.peer.addr = remote_addr; + if sender_mut.peer().addr.ip().is_unspecified() { + sender_mut.peer().addr = remote_addr; } } } @@ -2023,11 +2024,11 @@ impl P2pConnManager { msg_type = %msg, target_peer = %target, self_peer = %self_peer, - target_equals_self = (target.peer == self_peer), + target_equals_self = (target.peer() == self_peer), "[ROUTING] handle_notification_msg: Checking if message targets self" ); - if target.peer != self_peer { + if target.peer() != self_peer { // Message targets another peer - send as outbound tracing::info!( tx = %msg.id(), diff --git a/crates/core/src/node/op_state_manager.rs b/crates/core/src/node/op_state_manager.rs index b3464d370..fc92682c4 100644 --- a/crates/core/src/node/op_state_manager.rs +++ b/crates/core/src/node/op_state_manager.rs @@ -385,7 +385,7 @@ impl OpManager { .all_network_subscriptions() .into_iter() .map(|(contract_key, subscribers)| { - let peer_ids: Vec = subscribers.into_iter().map(|sub| sub.peer).collect(); + let peer_ids: Vec = subscribers.into_iter().map(|sub| sub.peer()).collect(); (contract_key, peer_ids) }) .collect() diff --git a/crates/core/src/node/p2p_impl.rs b/crates/core/src/node/p2p_impl.rs index b3a0802ee..3fcaa39c5 100644 --- a/crates/core/src/node/p2p_impl.rs +++ b/crates/core/src/node/p2p_impl.rs @@ -170,7 +170,7 @@ impl NodeP2P { tracing::debug!( %tx, - query_peer = %query_target.peer, + query_peer = %query_target.peer(), %ideal_location, "Triggering connection maintenance connect request" ); diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index 11217ad96..2a447dfd3 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -113,9 +113,9 @@ impl fmt::Display for ConnectMsg { } impl ConnectMsg { - pub fn sender(&self) -> Option<&PeerId> { + pub fn sender(&self) -> Option { match self { - ConnectMsg::Response { sender, .. } => Some(&sender.peer), + ConnectMsg::Response { sender, .. } => Some(sender.peer()), _ => None, } } @@ -219,10 +219,11 @@ impl ConnectForwardEstimator { // learns, per-node, how often downstream peers accept/complete forwarded Connect // requests so we can bias forwarding toward peers likely to have capacity. let key = TransportKeypair::new(); - let dummy_peer = PeerKeyLocation { - peer: PeerId::new("127.0.0.1:0".parse().unwrap(), key.public().clone()), - location: Some(Location::new(0.0)), - }; + let dummy_peer = PeerKeyLocation::with_location( + key.public().clone(), + "127.0.0.1:0".parse().unwrap(), + Location::new(0.0), + ); let seed_events = [ IsotonicEvent { peer: dummy_peer.clone(), @@ -297,17 +298,25 @@ impl RelayState { actions.accept_response = Some(ConnectResponse { acceptor: acceptor.clone(), }); - actions.expect_connection_from = Some(self.request.joiner.clone()); - // Use the joiner with updated observed address for response routing - actions.response_target = Some(self.request.joiner.clone()); - tracing::info!( - acceptor_peer = %acceptor.peer, - joiner_peer = %self.request.joiner.peer, - acceptor_loc = ?acceptor.location, - joiner_loc = ?self.request.joiner.location, - ring_distance = ?dist, - "connect: acceptance issued" - ); + // Get PeerKeyLocation for the joiner - should always succeed after observed_addr upgrade + if let Some(joiner_pkl) = self.request.joiner.to_peer_key_location() { + actions.expect_connection_from = Some(joiner_pkl.clone()); + // Use the joiner with updated observed address for response routing + actions.response_target = Some(joiner_pkl.clone()); + tracing::info!( + acceptor_peer = %acceptor.peer(), + joiner_peer = %joiner_pkl.peer(), + acceptor_loc = ?acceptor.location, + joiner_loc = ?joiner_pkl.location, + ring_distance = ?dist, + "connect: acceptance issued" + ); + } else { + tracing::warn!( + joiner = %self.request.joiner, + "connect: cannot accept joiner without known address" + ); + } } if self.forwarded_to.is_none() && self.request.ttl > 0 { @@ -322,7 +331,7 @@ impl RelayState { tracing::info!( target = %self.request.desired_location, ttl = self.request.ttl, - next_peer = %next.peer, + next_peer = %next.peer(), next_loc = ?next.location, ring_distance_to_target = ?dist, "connect: forwarding join request to next hop" @@ -334,7 +343,7 @@ impl RelayState { self.forwarded_to = Some(next.clone()); self.request = forward_req; forward_attempts.insert( - next.peer.clone(), + next.peer().clone(), ForwardAttempt { peer: next.clone(), desired: self.request.desired_location, @@ -400,7 +409,7 @@ impl RelayContext for RelayEnv<'_> { // self wasn't added to visited by upstream callers. let skip = SkipListWithSelf { visited, - self_peer: &self.self_location.peer, + self_peer: &self.self_location.peer(), }; let router = self.op_manager.ring.router.read(); let candidates = self.op_manager.ring.connection_manager.routing_candidates( @@ -414,7 +423,7 @@ impl RelayContext for RelayEnv<'_> { let mut eligible: Vec = Vec::new(); for cand in candidates { - if let Some(ts) = recency.get(&cand.peer) { + if let Some(ts) = recency.get(&cand.peer()) { if now.duration_since(*ts) < RECENCY_COOLDOWN { continue; } @@ -514,7 +523,7 @@ pub(crate) struct ConnectOp { impl ConnectOp { fn record_forward_outcome(&mut self, peer: &PeerKeyLocation, desired: Location, success: bool) { - self.forward_attempts.remove(&peer.peer); + self.forward_attempts.remove(&peer.peer()); self.connect_forward_estimator .write() .record(peer, desired, success); @@ -666,13 +675,13 @@ impl ConnectOp { match self.state.as_mut() { Some(ConnectState::WaitingForResponses(state)) => { tracing::info!( - acceptor = %response.acceptor.peer, + acceptor = %response.acceptor.peer(), acceptor_loc = ?response.acceptor.location, "connect: joiner received ConnectResponse" ); let result = state.register_acceptance(response, now); if let Some(new_acceptor) = &result.new_acceptor { - self.recency.remove(&new_acceptor.peer.peer); + self.recency.remove(&new_acceptor.peer.peer()); } if result.satisfied { self.state = Some(ConnectState::Completed); @@ -747,7 +756,7 @@ impl Operation for ConnectOp { match op_manager.pop(msg.id()) { Ok(Some(OpEnum::Connect(op))) => Ok(OpInitialization { op: *op, - sender: msg.sender().cloned(), + sender: msg.sender(), }), Ok(Some(other)) => { op_manager.push(tx, other).await?; @@ -798,21 +807,21 @@ impl Operation for ConnectOp { address, }; network_bridge - .send(&target.peer, NetMessage::V1(NetMessageV1::Connect(msg))) + .send(&target.peer(), NetMessage::V1(NetMessageV1::Connect(msg))) .await?; } if let Some(peer) = actions.expect_connection_from { op_manager .notify_node_event(NodeEvent::ExpectPeerConnection { - peer: peer.peer.clone(), + peer: peer.peer().clone(), }) .await?; } if let Some((next, request)) = actions.forward { // Record recency for this forward to avoid hammering the same neighbor. - self.recency.insert(next.peer.clone(), Instant::now()); + self.recency.insert(next.peer().clone(), Instant::now()); let forward_msg = ConnectMsg::Request { id: self.id, from: env.self_location().clone(), @@ -821,7 +830,7 @@ impl Operation for ConnectOp { }; network_bridge .send( - &next.peer, + &next.peer(), NetMessage::V1(NetMessageV1::Connect(forward_msg)), ) .await?; @@ -868,7 +877,7 @@ impl Operation for ConnectOp { op_manager .notify_node_event( crate::message::NodeEvent::ExpectPeerConnection { - peer: new_acceptor.peer.peer.clone(), + peer: new_acceptor.peer.peer().clone(), }, ) .await?; @@ -876,7 +885,7 @@ impl Operation for ConnectOp { let (callback, mut rx) = mpsc::channel(1); op_manager .notify_node_event(NodeEvent::ConnectPeer { - peer: new_acceptor.peer.peer.clone(), + peer: new_acceptor.peer.peer().clone(), tx: self.id, callback, is_gw: false, @@ -918,8 +927,8 @@ impl Operation for ConnectOp { self.record_forward_outcome(&fwd, desired, true); } tracing::debug!( - %upstream.peer, - acceptor = %sender.peer, + upstream = %upstream.peer(), + acceptor = %sender.peer(), "connect: forwarding response towards joiner" ); let forward_msg = ConnectMsg::Response { @@ -930,7 +939,7 @@ impl Operation for ConnectOp { }; network_bridge .send( - &upstream.peer, + &upstream.peer(), NetMessage::V1(NetMessageV1::Connect(forward_msg)), ) .await?; @@ -958,18 +967,18 @@ struct SkipListWithSelf<'a> { impl Contains for SkipListWithSelf<'_> { fn has_element(&self, target: PeerId) -> bool { - &target == self.self_peer || self.visited.iter().any(|p| p.peer == target) + &target == self.self_peer || self.visited.iter().any(|p| p.peer() == target) } } impl Contains<&PeerId> for SkipListWithSelf<'_> { fn has_element(&self, target: &PeerId) -> bool { - target == self.self_peer || self.visited.iter().any(|p| &p.peer == target) + target == self.self_peer || self.visited.iter().any(|p| &p.peer() == target) } } fn push_unique_peer(list: &mut Vec, peer: PeerKeyLocation) { - let already_present = list.iter().any(|p| p.peer == peer.peer); + let already_present = list.iter().any(|p| p.peer() == peer.peer()); if !already_present { list.push(peer); } @@ -1020,7 +1029,7 @@ pub(crate) async fn join_ring_request( if !op_manager .ring .connection_manager - .should_accept(location, &gateway.peer) + .should_accept(location, &gateway.peer()) { return Err(OpError::ConnError(ConnectionError::UnwantedConnection)); } @@ -1064,7 +1073,7 @@ pub(crate) async fn join_ring_request( op.backoff = Some(backoff); } - tracing::info!(%gateway.peer, tx = %tx, "Attempting network join using connect"); + tracing::info!(gateway = %gateway.peer(), tx = %tx, "Attempting network join using connect"); op_manager .notify_op_change( @@ -1243,20 +1252,14 @@ mod tests { fn make_peer(port: u16) -> PeerKeyLocation { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port); let keypair = TransportKeypair::new(); - PeerKeyLocation { - peer: PeerId::new(addr, keypair.public().clone()), - location: Some(Location::random()), - } + PeerKeyLocation::with_location(keypair.public().clone(), addr, Location::random()) } #[test] fn forward_estimator_handles_missing_location() { let mut estimator = ConnectForwardEstimator::new(); let key = TransportKeypair::new(); - let peer = PeerKeyLocation { - peer: PeerId::new("127.0.0.1:1111".parse().unwrap(), key.public().clone()), - location: None, - }; + let peer = PeerKeyLocation::new(key.public().clone(), "127.0.0.1:1111".parse().unwrap()); estimator.record(&peer, Location::new(0.25), true); } @@ -1273,7 +1276,7 @@ mod tests { ); let peer = make_peer(2000); op.forward_attempts.insert( - peer.peer.clone(), + peer.peer().clone(), ForwardAttempt { peer: peer.clone(), desired: Location::new(0.2), @@ -1310,8 +1313,11 @@ mod tests { state.handle_request(&ctx, &joiner, &recency, &mut forward_attempts, &estimator); let response = actions.accept_response.expect("expected acceptance"); - assert_eq!(response.acceptor.peer, self_loc.peer); - assert_eq!(actions.expect_connection_from.unwrap().peer, joiner.peer); + assert_eq!(response.acceptor.peer(), self_loc.peer()); + assert_eq!( + actions.expect_connection_from.unwrap().peer(), + joiner.peer() + ); assert!(actions.forward.is_none()); } @@ -1345,9 +1351,12 @@ mod tests { assert!(actions.accept_response.is_none()); let (forward_to, request) = actions.forward.expect("expected forward"); - assert_eq!(forward_to.peer, next_hop.peer); + assert_eq!(forward_to.peer(), next_hop.peer()); assert_eq!(request.ttl, 1); - assert!(request.visited.iter().any(|pkl| pkl.peer == joiner.peer)); + assert!(request + .visited + .iter() + .any(|pkl| pkl.peer() == joiner.peer())); } #[test] @@ -1409,7 +1418,7 @@ mod tests { let result = state.register_acceptance(&response, Instant::now()); assert!(result.satisfied); let new = result.new_acceptor.expect("expected new acceptor"); - assert_eq!(new.peer.peer, acceptor.peer); + assert_eq!(new.peer.peer(), acceptor.peer()); } #[test] @@ -1434,11 +1443,11 @@ mod tests { payload, .. } => { - assert_eq!(msg_target.peer, target.peer); + assert_eq!(msg_target.peer(), target.peer()); assert_eq!(payload.desired_location, desired); assert_eq!(payload.ttl, ttl); - assert!(payload.visited.iter().any(|p| p.peer == from.peer)); - assert!(payload.visited.iter().any(|p| p.peer == target.peer)); + assert!(payload.visited.iter().any(|p| p.peer() == from.peer())); + assert!(payload.visited.iter().any(|p| p.peer() == target.peer())); } other => panic!("unexpected message: {other:?}"), } @@ -1479,13 +1488,13 @@ mod tests { let (forward_target, forward_request) = actions .forward .expect("relay should forward when it declines to accept"); - assert_eq!(forward_target.peer, relay_b.peer); + assert_eq!(forward_target.peer(), relay_b.peer()); assert_eq!(forward_request.ttl, 2); assert!( forward_request .visited .iter() - .any(|p| p.peer == relay_a.peer), + .any(|p| p.peer() == relay_a.peer()), "forwarded request should record intermediate relay" ); @@ -1508,11 +1517,11 @@ mod tests { let response = accept_actions .accept_response .expect("second relay should accept when policy allows"); - assert_eq!(response.acceptor.peer, relay_b.peer); + assert_eq!(response.acceptor.peer(), relay_b.peer()); let expect_conn = accept_actions .expect_connection_from .expect("acceptance should request inbound connection from joiner"); - assert_eq!(expect_conn.peer, joiner.peer); + assert_eq!(expect_conn.peer(), joiner.peer()); } /// Regression test for issue #2141: ConnectResponse must be sent to the joiner's @@ -1522,10 +1531,11 @@ mod tests { // Joiner behind NAT with private address let private_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), 9000); let keypair = TransportKeypair::new(); - let joiner = PeerKeyLocation { - peer: PeerId::new(private_addr, keypair.public().clone()), - location: Some(Location::random()), - }; + let joiner = PeerKeyLocation::with_location( + keypair.public().clone(), + private_addr, + Location::random(), + ); // Gateway observes joiner's public/external address let observed_public_addr = @@ -1594,30 +1604,30 @@ mod tests { let skip_list = SkipListWithSelf { visited: &visited, - self_peer: &self_peer.peer, + self_peer: &self_peer.peer(), }; // Self should be excluded even though not in visited list assert!( - skip_list.has_element(self_peer.peer.clone()), + skip_list.has_element(self_peer.peer().clone()), "SkipListWithSelf must exclude self even when not in visited list" ); // Visited peer should be excluded assert!( - skip_list.has_element(visited_peer.peer.clone()), + skip_list.has_element(visited_peer.peer().clone()), "SkipListWithSelf must exclude peers in visited list" ); // Other peer should NOT be excluded assert!( - !skip_list.has_element(other_peer.peer.clone()), + !skip_list.has_element(other_peer.peer().clone()), "SkipListWithSelf must not exclude unrelated peers" ); // Test with reference variant assert!( - skip_list.has_element(&self_peer.peer), + skip_list.has_element(&self_peer.peer()), "SkipListWithSelf must exclude &self with reference variant" ); } diff --git a/crates/core/src/operations/get.rs b/crates/core/src/operations/get.rs index c9962477b..430a1d342 100644 --- a/crates/core/src/operations/get.rs +++ b/crates/core/src/operations/get.rs @@ -185,7 +185,7 @@ pub(crate) async fn request_get( let target = candidates.remove(0); tracing::debug!( tx = %id, - target = %target.peer, + target = %target.peer(), "Preparing get contract request", ); @@ -197,7 +197,7 @@ pub(crate) async fn request_get( subscribe, }) => { let mut tried_peers = HashSet::new(); - tried_peers.insert(target.peer.clone()); + tried_peers.insert(target.peer().clone()); let new_state = Some(GetState::AwaitingResponse { key: key_val, @@ -448,7 +448,7 @@ impl Operation for GetOp { ) -> Result, OpError> { let mut sender: Option = None; if let Some(peer_key_loc) = msg.sender().cloned() { - sender = Some(peer_key_loc.peer); + sender = Some(peer_key_loc.peer()); }; let tx = *msg.id(); match op_manager.pop(msg.id()) { @@ -507,8 +507,8 @@ impl Operation for GetOp { tracing::info!( tx = %id, %key, - target = %target.peer, - sender = %sender.peer, + target = %target.peer(), + sender = %sender.peer(), fetch_contract = *fetch_contract, skip = ?skip_list, "GET: received RequestGet" @@ -533,7 +533,7 @@ impl Operation for GetOp { tracing::debug!( tx = %id, %key, - target = %target.peer, + target = %target.peer(), "GET: RequestGet processing in state {:?}", self.state ); @@ -593,7 +593,7 @@ impl Operation for GetOp { { // This is a forwarded request - send result back to requester let requester = requester.clone().unwrap(); - tracing::debug!(tx = %id, "Returning contract {} to requester {}", key, requester.peer); + tracing::debug!(tx = %id, "Returning contract {} to requester {}", key, requester.peer()); new_state = None; return_msg = Some(GetMsg::ReturnGet { id: *id, @@ -624,13 +624,13 @@ impl Operation for GetOp { tx = %id, %key, "Contract not found locally (or missing code), forwarding to {}", - target.peer + target.peer() ); // Prepare skip list with own peer ID let own_loc = op_manager.ring.connection_manager.own_location(); let mut new_skip_list = skip_list.clone(); - new_skip_list.insert(own_loc.peer.clone()); + new_skip_list.insert(own_loc.peer().clone()); // Forward using standard routing helper return try_forward_or_return( @@ -666,7 +666,7 @@ impl Operation for GetOp { tracing::warn!( tx = %id, %key, - sender = %sender.peer, + sender = %sender.peer(), "Dropping GET SeekNode with zero HTL" ); return build_op_result( @@ -695,7 +695,7 @@ impl Operation for GetOp { // Update skip list with current peer let mut new_skip_list = skip_list.clone(); - new_skip_list.insert(this_peer.clone().peer); + new_skip_list.insert(this_peer.clone().peer()); // Try to get contract from local storage let get_result = op_manager @@ -721,7 +721,7 @@ impl Operation for GetOp { %key, %this_peer, "Contract state available but code missing @ peer {}, retrying", - sender.peer + sender.peer() ); None } else { @@ -732,14 +732,14 @@ impl Operation for GetOp { }; if let Some((state, contract)) = local_value { - tracing::debug!(tx = %id, "Contract {key} found @ peer {}", target.peer); + tracing::debug!(tx = %id, "Contract {key} found @ peer {}", target.peer()); match self.state { Some(GetState::AwaitingResponse { requester, .. }) => { if let Some(requester) = requester { // Forward contract to requester new_state = None; - tracing::debug!(tx = %id, "Returning contract {} to {}", key, sender.peer); + tracing::debug!(tx = %id, "Returning contract {} to {}", key, sender.peer()); return_msg = Some(GetMsg::ReturnGet { id, key, @@ -764,7 +764,7 @@ impl Operation for GetOp { Some(GetState::ReceivedRequest { .. }) => { // Return contract to sender new_state = None; - tracing::debug!(tx = %id, "Returning contract {} to {}", key, sender.peer); + tracing::debug!(tx = %id, "Returning contract {} to {}", key, sender.peer()); return_msg = Some(GetMsg::ReturnGet { id, key, @@ -786,7 +786,7 @@ impl Operation for GetOp { %key, %this_peer, "Contract not found @ peer {}, retrying with other peers", - sender.peer + sender.peer() ); return try_forward_or_return( id, @@ -813,8 +813,8 @@ impl Operation for GetOp { tracing::info!( tx = %id, %key, - from = %sender.peer, - to = %target.peer, + from = %sender.peer(), + to = %target.peer(), skip = ?skip_list, "GET: ReturnGet received with empty value" ); @@ -826,7 +826,7 @@ impl Operation for GetOp { %this_peer, "Neither contract or contract value for contract found at peer {}, \ retrying with other peers", - sender.peer + sender.peer() ); match self.state { @@ -846,7 +846,7 @@ impl Operation for GetOp { // todo: register in the stats for the outcome of the op that failed to get a response from this peer // Add the failed peer to tried list - tried_peers.insert(sender.peer.clone()); + tried_peers.insert(sender.peer().clone()); // First, check if we have alternatives at this hop level if !alternatives.is_empty() && attempts_at_hop < DEFAULT_MAX_BREADTH { @@ -856,7 +856,7 @@ impl Operation for GetOp { tracing::info!( tx = %id, %key, - next_peer = %next_target.peer, + next_peer = %next_target.peer(), fetch_contract, attempts_at_hop = attempts_at_hop + 1, max_attempts = DEFAULT_MAX_BREADTH, @@ -876,7 +876,7 @@ impl Operation for GetOp { }); // Update state with the new alternative being tried - tried_peers.insert(next_target.peer.clone()); + tried_peers.insert(next_target.peer().clone()); let updated_tried_peers = tried_peers.clone(); new_state = Some(GetState::AwaitingResponse { retries, @@ -932,7 +932,7 @@ impl Operation for GetOp { // Reset for new round of attempts let mut new_tried_peers = HashSet::new(); - new_tried_peers.insert(target.peer.clone()); + new_tried_peers.insert(target.peer().clone()); new_state = Some(GetState::AwaitingResponse { retries: retries + 1, @@ -1037,7 +1037,7 @@ impl Operation for GetOp { } Some(GetState::ReceivedRequest { .. }) => { // Return failure to sender - tracing::debug!(tx = %id, "Returning contract {} to {}", key, sender.peer); + tracing::debug!(tx = %id, "Returning contract {} to {}", key, sender.peer()); new_state = None; return_msg = Some(GetMsg::ReturnGet { id, @@ -1096,16 +1096,16 @@ impl Operation for GetOp { tracing::warn!( tx = %id, "Contract not received from peer {} while required", - sender.peer + sender.peer() ); let mut new_skip_list = skip_list.clone(); - new_skip_list.insert(sender.peer.clone()); + new_skip_list.insert(sender.peer().clone()); tracing::warn!( tx = %id, %key, - at = %sender.peer, + at = %sender.peer(), target = %requester, "Contract not received while required, returning response to requester", ); @@ -1291,7 +1291,7 @@ impl Operation for GetOp { } Some(GetState::ReceivedRequest { .. }) => { // Return response to sender - tracing::info!(tx = %id, "Returning contract {} to {}", key, sender.peer); + tracing::info!(tx = %id, "Returning contract {} to {}", key, sender.peer()); new_state = None; return_msg = Some(GetMsg::ReturnGet { id, @@ -1352,19 +1352,19 @@ async fn try_forward_or_return( tracing::warn!( tx = %id, %key, - this_peer = %this_peer.peer, + this_peer = %this_peer.peer(), "Contract not found while processing a get request", ); let mut new_skip_list = skip_list.clone(); - new_skip_list.insert(this_peer.peer.clone()); + new_skip_list.insert(this_peer.peer().clone()); let new_htl = htl.saturating_sub(1); let (new_target, alternatives) = if new_htl == 0 { tracing::warn!( tx = %id, - sender = %sender.peer, + sender = %sender.peer(), "The maximum hops have been exceeded, sending response back to the node", ); (None, vec![]) @@ -1379,7 +1379,7 @@ async fn try_forward_or_return( tracing::warn!( tx = %id, %key, - this_peer = %this_peer.peer, + this_peer = %this_peer.peer(), "No other peers found while trying to get the contract", ); (None, vec![]) @@ -1393,10 +1393,10 @@ async fn try_forward_or_return( tracing::debug!( tx = %id, "Forwarding get request to {}", - target.peer + target.peer() ); let mut tried_peers = HashSet::new(); - tried_peers.insert(target.peer.clone()); + tried_peers.insert(target.peer().clone()); build_op_result( id, @@ -1429,7 +1429,7 @@ async fn try_forward_or_return( tracing::debug!( tx = %id, "Cannot find any other peers to forward the get request to, returning get response to {}", - sender.peer + sender.peer() ); build_op_result( diff --git a/crates/core/src/operations/mod.rs b/crates/core/src/operations/mod.rs index 34e342597..1686bae70 100644 --- a/crates/core/src/operations/mod.rs +++ b/crates/core/src/operations/mod.rs @@ -145,7 +145,7 @@ where op_manager.completed(id); if let Some(target) = msg.target() { tracing::debug!(%id, %target, "sending final message to target"); - network_bridge.send(&target.peer, msg).await?; + network_bridge.send(&target.peer(), msg).await?; } return Ok(Some(updated_state)); } else { @@ -153,7 +153,7 @@ where tracing::debug!(%id, "operation in progress"); if let Some(target) = msg.target() { tracing::debug!(%id, %target, "sending updated op state"); - network_bridge.send(&target.peer, msg).await?; + network_bridge.send(&target.peer(), msg).await?; op_manager.push(id, updated_state).await?; } else { tracing::debug!(%id, "queueing op state for local processing"); @@ -186,8 +186,8 @@ where op_manager.completed(tx_id); if let Some(target) = msg.target() { - tracing::debug!(%tx_id, target=%target.peer, "sending back message to target"); - network_bridge.send(&target.peer, msg).await?; + tracing::debug!(%tx_id, target=%target.peer(), "sending back message to target"); + network_bridge.send(&target.peer(), msg).await?; } } Ok(OperationResult { diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index 27df6ebeb..aadefc3f5 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -93,7 +93,7 @@ impl Operation for PutOp { ) -> Result, OpError> { let mut sender: Option = None; if let Some(peer_key_loc) = msg.sender().cloned() { - sender = Some(peer_key_loc.peer); + sender = Some(peer_key_loc.peer()); }; let tx = *msg.id(); @@ -179,8 +179,8 @@ impl Operation for PutOp { tracing::info!( "Requesting put for contract {} from {} to {}", key, - sender.peer, - target.peer + sender.peer(), + target.peer() ); let subscribe = match &self.state { @@ -210,7 +210,7 @@ impl Operation for PutOp { tracing::debug!( tx = %id, %key, - peer = %prev_sender.peer, + peer = %prev_sender.peer(), is_already_seeding, "Processing local PUT in initiating node" ); @@ -243,7 +243,7 @@ impl Operation for PutOp { tracing::debug!( tx = %id, %key, - peer = %prev_sender.peer, + peer = %prev_sender.peer(), "Marked contract as seeding locally" ); } @@ -251,7 +251,7 @@ impl Operation for PutOp { tracing::debug!( tx = %id, %key, - peer = %prev_sender.peer, + peer = %prev_sender.peer(), was_already_seeding = is_already_seeding, "Successfully processed contract locally with merge" ); @@ -261,7 +261,7 @@ impl Operation for PutOp { tracing::debug!( tx = %id, %key, - peer = %sender.peer, + peer = %sender.peer(), "Not initiator, skipping local caching" ); value.clone() @@ -269,7 +269,7 @@ impl Operation for PutOp { // Determine next forwarding target - find peers closer to the contract location // Don't reuse the target from RequestPut as that's US (the current processing peer) - let skip = [&prev_sender.peer]; + let skip = [&prev_sender.peer()]; let next_target = op_manager .ring .closest_potentially_caching(&key, skip.as_slice()); @@ -297,7 +297,7 @@ impl Operation for PutOp { // When we're the origin node we already seeded the contract locally. // Treat downstream SuccessfulPut messages as best-effort so River is unblocked. - if origin.peer == own_location.peer { + if origin.peer() == own_location.peer() { tracing::debug!( tx = %id, %key, @@ -374,8 +374,8 @@ impl Operation for PutOp { tracing::debug!( tx = %id, %key, - target = %target.peer, - sender = %sender.peer, + target = %target.peer(), + sender = %sender.peer(), "Putting contract at target peer", ); @@ -389,7 +389,7 @@ impl Operation for PutOp { value.clone(), *id, new_htl, - HashSet::from([sender.peer.clone()]), + HashSet::from([sender.peer().clone()]), origin.clone(), ) .await @@ -429,11 +429,11 @@ impl Operation for PutOp { // Start subscription let mut skip_list = HashSet::new(); - skip_list.insert(sender.peer.clone()); + skip_list.insert(sender.peer().clone()); // Add target to skip list if not the last hop if !last_hop { - skip_list.insert(target.peer.clone()); + skip_list.insert(target.peer().clone()); } let child_tx = @@ -447,7 +447,7 @@ impl Operation for PutOp { }; // Broadcast changes to subscribers - let broadcast_to = op_manager.get_broadcast_targets(&key, &sender.peer); + let broadcast_to = op_manager.get_broadcast_targets(&key, &sender.peer()); match try_to_broadcast( *id, last_hop, @@ -492,7 +492,7 @@ impl Operation for PutOp { tracing::debug!(tx = %id, %key, "Contract successfully updated"); // Broadcast changes to subscribers - let broadcast_to = op_manager.get_broadcast_targets(key, &sender.peer); + let broadcast_to = op_manager.get_broadcast_targets(key, &sender.peer()); tracing::debug!( tx = %id, %key, @@ -535,7 +535,7 @@ impl Operation for PutOp { let sender = op_manager.ring.connection_manager.own_location(); let mut broadcasted_to = *broadcasted_to; - if upstream.peer == sender.peer { + if upstream.peer() == sender.peer() { // Originator reached the subscription tree. This path should be filtered // out by the deduplication layer, so treat it as a warning if it happens // to help surface potential bugs. @@ -559,19 +559,21 @@ impl Operation for PutOp { tracing::trace!( tx = %id, %key, - upstream = %upstream.peer, + upstream = %upstream.peer(), "Forwarding SuccessfulPut upstream before broadcast" ); conn_manager - .send(&upstream.peer, NetMessage::from(ack)) + .send(&upstream.peer(), NetMessage::from(ack)) .await?; new_state = None; } // Broadcast to all peers in parallel + // Collect peer_ids first to ensure they outlive the futures + let peer_ids: Vec<_> = broadcast_to.iter().map(|p| p.peer()).collect(); let mut broadcasting = Vec::with_capacity(broadcast_to.len()); - for peer in broadcast_to.iter() { + for (peer, peer_id) in broadcast_to.iter().zip(peer_ids.iter()) { let msg = PutMsg::BroadcastTo { id: *id, key: *key, @@ -581,7 +583,7 @@ impl Operation for PutOp { contract: contract.clone(), target: peer.clone(), }; - let f = conn_manager.send(&peer.peer, msg.into()); + let f = conn_manager.send(peer_id, msg.into()); broadcasting.push(f); } @@ -605,11 +607,11 @@ impl Operation for PutOp { let peer = broadcast_to.get(peer_num).unwrap(); tracing::warn!( "failed broadcasting put change to {} with error {}; dropping connection", - peer.peer, + peer.peer(), err ); // todo: review this, maybe we should just dropping this subscription - conn_manager.drop_connection(&peer.peer).await?; + conn_manager.drop_connection(&peer.peer()).await?; incorrect_results += 1; } @@ -718,7 +720,7 @@ impl Operation for PutOp { tracing::trace!( tx = %id, %key, - upstream = %upstream_peer.peer, + upstream = %upstream_peer.peer(), "PutOp::process_message: Forwarding SuccessfulPut upstream" ); return_msg = Some(PutMsg::SuccessfulPut { @@ -766,7 +768,7 @@ impl Operation for PutOp { tracing::warn!( tx = %id, %contract, - sender = %sender.peer, + sender = %sender.peer(), "Discarding PutForward with zero HTL" ); return Ok(OperationResult { @@ -784,7 +786,7 @@ impl Operation for PutOp { tracing::debug!( tx = %id, %key, - this_peer = %peer_loc.peer, + this_peer = %peer_loc.peer(), "Forwarding changes, trying to put the contract" ); @@ -808,7 +810,7 @@ impl Operation for PutOp { let last_hop = if let Some(new_htl) = htl_value.checked_sub(1) { // Create updated skip list let mut new_skip_list = skip_list.clone(); - new_skip_list.insert(sender.peer.clone()); + new_skip_list.insert(sender.peer().clone()); // Forward to closer peers let put_here = forward_put( @@ -857,7 +859,7 @@ impl Operation for PutOp { for subscriber in old_subscribers { conn_manager .send( - &subscriber.peer, + &subscriber.peer(), NetMessage::V1(NetMessageV1::Unsubscribed { transaction: Transaction::new::(), key: dropped_key, @@ -884,7 +886,7 @@ impl Operation for PutOp { } // Broadcast changes to subscribers - let broadcast_to = op_manager.get_broadcast_targets(&key, &sender.peer); + let broadcast_to = op_manager.get_broadcast_targets(&key, &sender.peer()); match try_to_broadcast( *id, last_hop, @@ -920,7 +922,7 @@ impl OpManager { .map(|subs| { subs.value() .iter() - .filter(|pk| &pk.peer != sender) + .filter(|pk| &pk.peer() != sender) .cloned() .collect::>() }) @@ -1169,13 +1171,13 @@ pub(crate) async fn request_put(op_manager: &OpManager, mut put_op: PutOp) -> Re // Find the optimal target for this contract let target = op_manager .ring - .closest_potentially_caching(&key, [&own_location.peer].as_slice()); + .closest_potentially_caching(&key, [&own_location.peer()].as_slice()); tracing::debug!( tx = %id, %key, target_found = target.is_some(), - target_peer = ?target.as_ref().map(|t| t.peer.to_string()), + target_peer = ?target.as_ref().map(|t| t.peer().to_string()), "Determined PUT routing target" ); @@ -1197,7 +1199,7 @@ pub(crate) async fn request_put(op_manager: &OpManager, mut put_op: PutOp) -> Re op_manager.ring.seed_contract(key); // Determine which peers need to be notified and broadcast the update - let broadcast_to = op_manager.get_broadcast_targets(&key, &own_location.peer); + let broadcast_to = op_manager.get_broadcast_targets(&key, &own_location.peer()); if broadcast_to.is_empty() { // No peers to broadcast to - operation complete @@ -1266,7 +1268,7 @@ pub(crate) async fn request_put(op_manager: &OpManager, mut put_op: PutOp) -> Re tracing::debug!( tx = %id, %key, - target_peer = %target_peer.peer, + target_peer = %target_peer.peer(), target_location = ?target_peer.location, "Caching state locally before forwarding PUT to target peer" ); @@ -1428,7 +1430,7 @@ where tracing::info!( tx = %id, %key, - target_peer = %peer.peer, + target_peer = %peer.peer(), target_location = %other_loc.0, target_distance = ?other_distance, self_distance = ?self_distance, @@ -1436,7 +1438,7 @@ where "Found potential forward target" ); - if peer.peer == own_pkloc.peer { + if peer.peer() == own_pkloc.peer() { tracing::info!( tx = %id, %key, @@ -1450,21 +1452,21 @@ where tracing::info!( tx = %id, %key, - target_peer = %peer.peer, + target_peer = %peer.peer(), "HTL exhausted - storing locally" ); return true; } let mut updated_skip_list = skip_list.clone(); - updated_skip_list.insert(own_pkloc.peer.clone()); + updated_skip_list.insert(own_pkloc.peer().clone()); if other_distance < self_distance { tracing::info!( tx = %id, %key, - from_peer = %own_pkloc.peer, - to_peer = %peer.peer, + from_peer = %own_pkloc.peer(), + to_peer = %peer.peer(), contract_location = %contract_loc.0, from_location = %own_loc.0, to_location = %other_loc.0, @@ -1475,8 +1477,8 @@ where tracing::info!( tx = %id, %key, - from_peer = %own_pkloc.peer, - to_peer = %peer.peer, + from_peer = %own_pkloc.peer(), + to_peer = %peer.peer(), contract_location = %contract_loc.0, from_location = %own_loc.0, to_location = %other_loc.0, @@ -1487,7 +1489,7 @@ where let _ = conn_manager .send( - &peer.peer, + &peer.peer(), (PutMsg::PutForward { id, sender: own_pkloc, diff --git a/crates/core/src/operations/subscribe.rs b/crates/core/src/operations/subscribe.rs index c8fab8952..547282991 100644 --- a/crates/core/src/operations/subscribe.rs +++ b/crates/core/src/operations/subscribe.rs @@ -29,7 +29,7 @@ fn subscribers_snapshot(op_manager: &OpManager, key: &ContractKey) -> Vec>() }) .unwrap_or_default() @@ -149,13 +149,13 @@ pub(crate) async fn request_subscribe( tracing::debug!( tx = %id, %key, - subscriber_peer = %own_loc.peer, + subscriber_peer = %own_loc.peer(), local_has_contract, "subscribe: request_subscribe invoked" ); let mut skip_list: HashSet = HashSet::new(); - skip_list.insert(own_loc.peer.clone()); + skip_list.insert(own_loc.peer().clone()); // Use k_closest_potentially_caching to try multiple candidates // Try up to 3 candidates @@ -170,7 +170,7 @@ pub(crate) async fn request_subscribe( .collect(); let candidate_display: Vec = candidates .iter() - .map(|cand| format!("{:.8}", cand.peer)) + .map(|cand| format!("{:.8}", cand.peer())) .collect(); tracing::info!( tx = %id, @@ -205,7 +205,7 @@ pub(crate) async fn request_subscribe( .map(|subs| { subs.value() .iter() - .map(|loc| format!("{:.8}", loc.peer)) + .map(|loc| format!("{:.8}", loc.peer())) .collect::>() }) .unwrap_or_default(); @@ -231,7 +231,7 @@ pub(crate) async fn request_subscribe( tracing::debug!( tx = %id, %key, - target_peer = %target.peer, + target_peer = %target.peer(), target_location = ?target.location, "subscribe: forwarding RequestSub to target peer" ); @@ -265,7 +265,7 @@ async fn complete_local_subscription( tracing::warn!( %key, tx = %id, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), error = ?err, "Failed to register local subscriber" ); @@ -273,7 +273,7 @@ async fn complete_local_subscription( tracing::debug!( %key, tx = %id, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), "Registered local subscriber" ); } @@ -328,7 +328,7 @@ impl Operation for SubscribeOp { ) -> Result, OpError> { let mut sender: Option = None; if let Some(peer_key_loc) = msg.sender().cloned() { - sender = Some(peer_key_loc.peer); + sender = Some(peer_key_loc.peer()); }; let id = *msg.id(); @@ -382,7 +382,7 @@ impl Operation for SubscribeOp { tracing::debug!( tx = %id, %key, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), "subscribe: processing RequestSub" ); let own_loc = op_manager.ring.connection_manager.own_location(); @@ -406,7 +406,7 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_before = ?before_direct, "subscribe: handling RequestSub locally (contract available)" ); @@ -419,7 +419,7 @@ impl Operation for SubscribeOp { tracing::warn!( tx = %id, %key, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_before = ?before_direct, "subscribe: direct registration failed (max subscribers reached)" ); @@ -439,12 +439,12 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_after = ?after_direct, "subscribe: registered direct subscriber (RequestSub)" ); - if subscriber.peer == own_loc.peer { + if subscriber.peer() == own_loc.peer() { tracing::debug!( tx = %id, %key, @@ -484,18 +484,18 @@ impl Operation for SubscribeOp { } let mut skip = HashSet::new(); - skip.insert(subscriber.peer.clone()); - skip.insert(own_loc.peer.clone()); + skip.insert(subscriber.peer().clone()); + skip.insert(own_loc.peer().clone()); let forward_target = op_manager .ring .k_closest_potentially_caching(key, &skip, 3) .into_iter() - .find(|candidate| candidate.peer != own_loc.peer) + .find(|candidate| candidate.peer() != own_loc.peer()) .ok_or_else(|| RingError::NoCachingPeers(*key)) .map_err(OpError::from)?; - skip.insert(forward_target.peer.clone()); + skip.insert(forward_target.peer().clone()); new_state = self.state; return_msg = Some(SubscribeMsg::SeekNode { @@ -537,7 +537,7 @@ impl Operation for SubscribeOp { tracing::warn!( tx = %id, %key, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), "Dropping Subscribe SeekNode with zero HTL" ); return Ok(return_not_subbed()); @@ -606,13 +606,13 @@ impl Operation for SubscribeOp { } let mut new_skip_list = skip_list.clone(); - new_skip_list.insert(target.peer.clone()); + new_skip_list.insert(target.peer().clone()); tracing::info!( tx = %id, %key, - new_target = %new_target.peer, - upstream = %subscriber.peer, + new_target = %new_target.peer(), + upstream = %subscriber.peer(), "Forward request to peer" ); tracing::debug!( @@ -650,7 +650,7 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_before = ?before_direct, "subscribe: attempting to register direct subscriber" ); @@ -662,7 +662,7 @@ impl Operation for SubscribeOp { tracing::warn!( tx = %id, %key, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_before = ?before_direct, "subscribe: direct registration failed (max subscribers reached)" ); @@ -673,7 +673,7 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_after = ?after_direct, "subscribe: registered direct subscriber" ); @@ -683,7 +683,7 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - subscriber = % subscriber.peer, + subscriber = % subscriber.peer(), "Peer successfully subscribed to contract", ); new_state = None; @@ -708,7 +708,7 @@ impl Operation for SubscribeOp { tracing::warn!( tx = %id, %key, - potential_provider = %sender.peer, + potential_provider = %sender.peer(), "Contract not found at potential subscription provider", ); // will error out in case it has reached max number of retries @@ -720,7 +720,7 @@ impl Operation for SubscribeOp { current_hop, }) => { if retries < MAX_RETRIES { - skip_list.insert(sender.peer.clone()); + skip_list.insert(sender.peer().clone()); // Use k_closest_potentially_caching to try multiple candidates let candidates = op_manager .ring @@ -773,8 +773,8 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - this_peer = %target.peer, - provider = %sender.peer, + this_peer = %target.peer(), + provider = %sender.peer(), "Subscribed to contract" ); tracing::info!( @@ -782,7 +782,7 @@ impl Operation for SubscribeOp { %key, upstream = upstream_subscriber .as_ref() - .map(|loc| format!("{:.8}", loc.peer)) + .map(|loc| format!("{:.8}", loc.peer())) .unwrap_or_else(|| "".into()), "Handling ReturnSub (subscribed=true)" ); @@ -791,7 +791,7 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - upstream = %upstream_subscriber.peer, + upstream = %upstream_subscriber.peer(), subscribers_before = ?before_upstream, "subscribe: attempting to register upstream link" ); @@ -803,7 +803,7 @@ impl Operation for SubscribeOp { tracing::warn!( tx = %id, %key, - upstream = %upstream_subscriber.peer, + upstream = %upstream_subscriber.peer(), subscribers_before = ?before_upstream, "subscribe: upstream registration failed (max subscribers reached)" ); @@ -812,7 +812,7 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - upstream = %upstream_subscriber.peer, + upstream = %upstream_subscriber.peer(), subscribers_after = ?after_upstream, "subscribe: registered upstream link" ); @@ -823,7 +823,7 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - provider = %sender.peer, + provider = %sender.peer(), subscribers_before = ?before_provider, "subscribe: registering provider/subscription source" ); @@ -840,7 +840,7 @@ impl Operation for SubscribeOp { tracing::info!( tx = %id, %key, - provider = %sender.peer, + provider = %sender.peer(), subscribers_after = ?after_provider, "subscribe: registered provider/subscription source" ); @@ -850,7 +850,7 @@ impl Operation for SubscribeOp { tracing::debug!( tx = %id, %key, - upstream_subscriber = %upstream_subscriber.peer, + upstream_subscriber = %upstream_subscriber.peer(), "Forwarding subscription to upstream subscriber" ); return_msg = Some(SubscribeMsg::ReturnSub { diff --git a/crates/core/src/operations/subscribe/tests.rs b/crates/core/src/operations/subscribe/tests.rs index af8c3dfad..abca9f655 100644 --- a/crates/core/src/operations/subscribe/tests.rs +++ b/crates/core/src/operations/subscribe/tests.rs @@ -8,6 +8,13 @@ use crate::{ use freenet_stdlib::prelude::{ContractInstanceId, ContractKey}; use std::collections::HashSet; +/// Helper to create PeerKeyLocation with a random peer and specific location +fn random_peer_at(loc: f64) -> PeerKeyLocation { + let mut pkl = PeerKeyLocation::from(PeerId::random()); + pkl.location = Some(Location::try_from(loc).unwrap()); + pkl +} + /// TestRing implements only the methods used by subscription routing #[allow(clippy::type_complexity)] struct TestRing { @@ -21,7 +28,7 @@ impl TestRing { Self { k_closest_calls: std::sync::Arc::new(tokio::sync::Mutex::new(Vec::new())), candidates, - own_peer: own_location.peer, + own_peer: own_location.peer(), } } @@ -35,8 +42,8 @@ impl TestRing { let mut skip_vec: Vec = self .candidates .iter() - .filter(|peer| skip_list.has_element(peer.peer.clone())) - .map(|peer| peer.peer.clone()) + .filter(|peer| skip_list.has_element(peer.peer().clone())) + .map(|peer| peer.peer().clone()) .collect(); if skip_list.has_element(self.own_peer.clone()) // avoid duplicates if own peer also in candidates @@ -51,7 +58,7 @@ impl TestRing { // Return candidates not in skip list self.candidates .iter() - .filter(|peer| !skip_list.has_element(peer.peer.clone())) + .filter(|peer| !skip_list.has_element(peer.peer().clone())) .take(k) .cloned() .collect() @@ -64,23 +71,15 @@ impl TestRing { async fn test_subscription_routing_calls_k_closest_with_skip_list() { let contract_key = ContractKey::from(ContractInstanceId::new([10u8; 32])); - // Create test peers - let peer1 = PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.1).unwrap()), - }; - let peer2 = PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.2).unwrap()), - }; - let peer3 = PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.3).unwrap()), - }; - let own_location = PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.5).unwrap()), - }; + // Create test peers using From and setting specific locations + let mut peer1 = PeerKeyLocation::from(PeerId::random()); + peer1.location = Some(Location::try_from(0.1).unwrap()); + let mut peer2 = PeerKeyLocation::from(PeerId::random()); + peer2.location = Some(Location::try_from(0.2).unwrap()); + let mut peer3 = PeerKeyLocation::from(PeerId::random()); + peer3.location = Some(Location::try_from(0.3).unwrap()); + let mut own_location = PeerKeyLocation::from(PeerId::random()); + own_location.location = Some(Location::try_from(0.5).unwrap()); // Create TestRing with multiple candidates let test_ring = TestRing::new( @@ -97,7 +96,7 @@ async fn test_subscription_routing_calls_k_closest_with_skip_list() { // 2. Test k_closest_potentially_caching with initial skip list containing self (simulates request_subscribe call) let mut initial_skip = HashSet::new(); - initial_skip.insert(own_location.peer.clone()); + initial_skip.insert(own_location.peer().clone()); let initial_candidates = test_ring .k_closest_potentially_caching(&contract_key, &initial_skip, 3) .await; @@ -119,7 +118,8 @@ async fn test_subscription_routing_calls_k_closest_with_skip_list() { "Initial call should only skip own peer" ); assert_eq!( - k_closest_calls[0].1[0], own_location.peer, + k_closest_calls[0].1[0], + own_location.peer(), "Initial skip list should contain own peer" ); assert_eq!(k_closest_calls[0].2, 3, "Should request 3 candidates"); @@ -135,7 +135,7 @@ async fn test_subscription_routing_calls_k_closest_with_skip_list() { // 5. Test retry with skip list (simulates ReturnSub handler) let mut skip_list = HashSet::new(); - skip_list.insert(peer1.peer.clone()); // First peer failed + skip_list.insert(peer1.peer().clone()); // First peer failed // This is the critical call that would happen in the ReturnSub handler let candidates_after_failure = test_ring @@ -159,7 +159,8 @@ async fn test_subscription_routing_calls_k_closest_with_skip_list() { "Skip list should contain 1 failed peer" ); assert_eq!( - k_closest_calls[1].1[0], peer1.peer, + k_closest_calls[1].1[0], + peer1.peer(), "Skip list should contain the failed peer" ); assert_eq!(k_closest_calls[1].2, 3, "Should still request 3 candidates"); @@ -169,7 +170,7 @@ async fn test_subscription_routing_calls_k_closest_with_skip_list() { assert!( !candidates_after_failure .iter() - .any(|p| p.peer == peer1.peer), + .any(|p| p.peer() == peer1.peer()), "Failed peer should be excluded from candidates" ); assert!( @@ -182,7 +183,7 @@ async fn test_subscription_routing_calls_k_closest_with_skip_list() { ); // 8. Test multiple failures - skip_list.insert(peer2.peer.clone()); // Second peer also failed + skip_list.insert(peer2.peer().clone()); // Second peer also failed let final_candidates = test_ring .k_closest_potentially_caching(&contract_key, &skip_list, 3) .await; @@ -213,7 +214,7 @@ async fn test_subscription_routing_calls_k_closest_with_skip_list() { assert!( !final_candidates .iter() - .any(|p| p.peer == peer1.peer || p.peer == peer2.peer), + .any(|p| p.peer() == peer1.peer() || p.peer() == peer2.peer()), "Failed peers should be excluded" ); @@ -235,23 +236,15 @@ async fn test_subscription_routing_calls_k_closest_with_skip_list() { async fn test_subscription_production_code_paths_use_k_closest() { let contract_key = ContractKey::from(ContractInstanceId::new([11u8; 32])); - // Create test peers - let peer1 = PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.1).unwrap()), - }; - let peer2 = PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.2).unwrap()), - }; - let peer3 = PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.3).unwrap()), - }; - let own_location = PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.5).unwrap()), - }; + // Create test peers using From and setting specific locations + let mut peer1 = PeerKeyLocation::from(PeerId::random()); + peer1.location = Some(Location::try_from(0.1).unwrap()); + let mut peer2 = PeerKeyLocation::from(PeerId::random()); + peer2.location = Some(Location::try_from(0.2).unwrap()); + let mut peer3 = PeerKeyLocation::from(PeerId::random()); + peer3.location = Some(Location::try_from(0.3).unwrap()); + let mut own_location = PeerKeyLocation::from(PeerId::random()); + own_location.location = Some(Location::try_from(0.5).unwrap()); // Create TestRing that records all k_closest_potentially_caching calls let test_ring = TestRing::new( @@ -269,7 +262,7 @@ async fn test_subscription_production_code_paths_use_k_closest() { // Test 2: Simulate the k_closest_potentially_caching call made in request_subscribe // (Line 72 in subscribe.rs: op_manager.ring.k_closest_potentially_caching(key, skip_list, 3)) let mut initial_skip = HashSet::new(); - initial_skip.insert(own_location.peer.clone()); + initial_skip.insert(own_location.peer().clone()); let initial_candidates = test_ring .k_closest_potentially_caching(&contract_key, &initial_skip, 3) .await; @@ -291,7 +284,8 @@ async fn test_subscription_production_code_paths_use_k_closest() { "Should skip own peer initially" ); assert_eq!( - k_closest_calls[0].1[0], own_location.peer, + k_closest_calls[0].1[0], + own_location.peer(), "Skip list should contain own peer" ); assert_eq!(k_closest_calls[0].2, 3, "Should request 3 candidates"); @@ -307,7 +301,7 @@ async fn test_subscription_production_code_paths_use_k_closest() { // Test 3: Simulate the k_closest_potentially_caching call made in SeekNode handler // (Line 241 in subscribe.rs: op_manager.ring.k_closest_potentially_caching(key, skip_list, 3)) let mut skip_list = HashSet::new(); - skip_list.insert(peer1.peer.clone()); + skip_list.insert(peer1.peer().clone()); let seek_candidates = test_ring .k_closest_potentially_caching(&contract_key, &skip_list, 3) .await; @@ -325,7 +319,8 @@ async fn test_subscription_production_code_paths_use_k_closest() { "Should include failed peer in skip list" ); assert_eq!( - k_closest_calls[1].1[0], peer1.peer, + k_closest_calls[1].1[0], + peer1.peer(), "Should skip the failed peer" ); assert_eq!(k_closest_calls[1].2, 3, "Should still request 3 candidates"); @@ -333,7 +328,7 @@ async fn test_subscription_production_code_paths_use_k_closest() { // Verify failed peer is excluded from results assert!( - !seek_candidates.iter().any(|p| p.peer == peer1.peer), + !seek_candidates.iter().any(|p| p.peer() == peer1.peer()), "Should exclude failed peer" ); assert_eq!( @@ -345,7 +340,7 @@ async fn test_subscription_production_code_paths_use_k_closest() { // Test 4: Simulate the k_closest_potentially_caching call made in ReturnSub(false) handler // (Line 336 in subscribe.rs: op_manager.ring.k_closest_potentially_caching(key, &skip_list, 3)) - skip_list.insert(peer2.peer.clone()); // Second peer also failed + skip_list.insert(peer2.peer().clone()); // Second peer also failed let retry_candidates = test_ring .k_closest_potentially_caching(&contract_key, &skip_list, 3) .await; @@ -363,11 +358,11 @@ async fn test_subscription_production_code_paths_use_k_closest() { "Should include both failed peers in skip list" ); assert!( - k_closest_calls[2].1.contains(&peer1.peer), + k_closest_calls[2].1.contains(&peer1.peer()), "Should skip peer1" ); assert!( - k_closest_calls[2].1.contains(&peer2.peer), + k_closest_calls[2].1.contains(&peer2.peer()), "Should skip peer2" ); assert_eq!(k_closest_calls[2].2, 3, "Should still request 3 candidates"); @@ -377,7 +372,7 @@ async fn test_subscription_production_code_paths_use_k_closest() { assert!( !retry_candidates .iter() - .any(|p| p.peer == peer1.peer || p.peer == peer2.peer), + .any(|p| p.peer() == peer1.peer() || p.peer() == peer2.peer()), "Should exclude both failed peers" ); assert_eq!(retry_candidates.len(), 1, "Should return final 1 candidate"); @@ -416,23 +411,11 @@ async fn test_subscription_validates_k_closest_usage() { // Create TestRing that records all k_closest calls let test_ring = TestRing::new( vec![ - PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.1).unwrap()), - }, - PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.2).unwrap()), - }, - PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.3).unwrap()), - }, + random_peer_at(0.1), + random_peer_at(0.2), + random_peer_at(0.3), ], - PeerKeyLocation { - peer: PeerId::random(), - location: Some(Location::try_from(0.5).unwrap()), - }, + random_peer_at(0.5), ); // Test 1: Validate the exact call pattern from request_subscribe (line 72) @@ -463,7 +446,7 @@ async fn test_subscription_validates_k_closest_usage() { { test_ring.k_closest_calls.lock().await.clear(); - let failed_peer = test_ring.candidates[0].peer.clone(); + let failed_peer = test_ring.candidates[0].peer().clone(); let skip_list = [failed_peer.clone()]; let candidates = test_ring @@ -480,7 +463,7 @@ async fn test_subscription_validates_k_closest_usage() { // Verify failed peer is excluded from candidates assert!( - !candidates.iter().any(|c| c.peer == failed_peer), + !candidates.iter().any(|c| c.peer() == failed_peer), "Failed peer must be excluded" ); } diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index 7743ce95b..25ad178b1 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -91,7 +91,7 @@ impl Operation for UpdateOp { ) -> Result, OpError> { let mut sender: Option = None; if let Some(peer_key_loc) = msg.sender().cloned() { - sender = Some(peer_key_loc.peer); + sender = Some(peer_key_loc.peer()); }; let tx = *msg.id(); match op_manager.pop(msg.id()) { @@ -154,23 +154,23 @@ impl Operation for UpdateOp { tracing::debug!( tx = %id, %key, - executing_peer = %self_location.peer, - request_sender = %request_sender.peer, - target_peer = %target.peer, + executing_peer = %self_location.peer(), + request_sender = %request_sender.peer(), + target_peer = %target.peer(), "UPDATE RequestUpdate: executing_peer={} received request from={} targeting={}", - self_location.peer, - request_sender.peer, - target.peer + self_location.peer(), + request_sender.peer(), + target.peer() ); // If target is not us, this message is meant for another peer // This can happen when the initiator processes its own message before sending to network - if target.peer != self_location.peer { + if target.peer() != self_location.peer() { tracing::debug!( tx = %id, %key, - our_peer = %self_location.peer, - target_peer = %target.peer, + our_peer = %self_location.peer(), + target_peer = %target.peer(), "RequestUpdate target is not us - message will be routed to target" ); // Keep current state, message will be sent to target peer via network @@ -249,7 +249,7 @@ impl Operation for UpdateOp { } else { // Get broadcast targets for propagating UPDATE to subscribers let broadcast_to = op_manager - .get_broadcast_targets_update(key, &request_sender.peer); + .get_broadcast_targets_update(key, &request_sender.peer()); if broadcast_to.is_empty() { tracing::debug!( @@ -294,14 +294,14 @@ impl Operation for UpdateOp { // Contract not found locally - forward to another peer let next_target = op_manager.ring.closest_potentially_caching( key, - [&self_location.peer, &request_sender.peer].as_slice(), + [&self_location.peer(), &request_sender.peer()].as_slice(), ); if let Some(forward_target) = next_target { tracing::debug!( tx = %id, %key, - next_peer = %forward_target.peer, + next_peer = %forward_target.peer(), "Forwarding UPDATE to peer that might have contract" ); @@ -317,14 +317,14 @@ impl Operation for UpdateOp { new_state = None; } else { // No peers available and we don't have the contract - capture context - let skip_list = [&self_location.peer, &request_sender.peer]; + let skip_list = [&self_location.peer(), &request_sender.peer()]; let subscribers = op_manager .ring .subscribers_of(key) .map(|subs| { subs.value() .iter() - .map(|loc| format!("{:.8}", loc.peer)) + .map(|loc| format!("{:.8}", loc.peer())) .collect::>() }) .unwrap_or_default(); @@ -332,7 +332,7 @@ impl Operation for UpdateOp { .ring .k_closest_potentially_caching(key, skip_list.as_slice(), 5) .into_iter() - .map(|loc| format!("{:.8}", loc.peer)) + .map(|loc| format!("{:.8}", loc.peer())) .collect::>(); let connection_count = op_manager.ring.connection_manager.num_connections(); @@ -342,7 +342,7 @@ impl Operation for UpdateOp { subscribers = ?subscribers, candidates = ?candidates, connection_count, - request_sender = %request_sender.peer, + request_sender = %request_sender.peer(), "Cannot handle UPDATE: contract not found locally and no peers to forward to" ); return Err(OpError::RingError(RingError::NoCachingPeers(*key))); @@ -410,7 +410,7 @@ impl Operation for UpdateOp { } else { // Get broadcast targets let broadcast_to = - op_manager.get_broadcast_targets_update(key, &sender.peer); + op_manager.get_broadcast_targets_update(key, &sender.peer()); // If no peers to broadcast to, nothing else to do if broadcast_to.is_empty() { @@ -448,14 +448,14 @@ impl Operation for UpdateOp { let self_location = op_manager.ring.connection_manager.own_location(); let next_target = op_manager.ring.closest_potentially_caching( key, - [&sender.peer, &self_location.peer].as_slice(), + [&sender.peer(), &self_location.peer()].as_slice(), ); if let Some(forward_target) = next_target { tracing::debug!( tx = %id, %key, - next_peer = %forward_target.peer, + next_peer = %forward_target.peer(), "Contract not found - forwarding SeekNode to next peer" ); @@ -471,14 +471,14 @@ impl Operation for UpdateOp { new_state = None; } else { // No more peers to try - capture context for diagnostics - let skip_list = [&sender.peer, &self_location.peer]; + let skip_list = [&sender.peer(), &self_location.peer()]; let subscribers = op_manager .ring .subscribers_of(key) .map(|subs| { subs.value() .iter() - .map(|loc| format!("{:.8}", loc.peer)) + .map(|loc| format!("{:.8}", loc.peer())) .collect::>() }) .unwrap_or_default(); @@ -486,7 +486,7 @@ impl Operation for UpdateOp { .ring .k_closest_potentially_caching(key, skip_list.as_slice(), 5) .into_iter() - .map(|loc| format!("{:.8}", loc.peer)) + .map(|loc| format!("{:.8}", loc.peer())) .collect::>(); let connection_count = op_manager.ring.connection_manager.num_connections(); @@ -496,7 +496,7 @@ impl Operation for UpdateOp { subscribers = ?subscribers, candidates = ?candidates, connection_count, - sender = %sender.peer, + sender = %sender.peer(), "Cannot handle UPDATE SeekNode: contract not found and no peers to forward to" ); return Err(OpError::RingError(RingError::NoCachingPeers(*key))); @@ -534,7 +534,7 @@ impl Operation for UpdateOp { return_msg = None; } else { let broadcast_to = - op_manager.get_broadcast_targets_update(key, &sender.peer); + op_manager.get_broadcast_targets_update(key, &sender.peer()); tracing::debug!( "Successfully updated a value for contract {} @ {:?} - BroadcastTo - update", @@ -574,9 +574,11 @@ impl Operation for UpdateOp { let sender = op_manager.ring.connection_manager.own_location(); let mut broadcasted_to = *broadcasted_to; + // Collect peer_ids first to ensure they outlive the futures + let peer_ids: Vec<_> = broadcast_to.iter().map(|p| p.peer()).collect(); let mut broadcasting = Vec::with_capacity(broadcast_to.len()); - for peer in broadcast_to.iter() { + for (peer, peer_id) in broadcast_to.iter().zip(peer_ids.iter()) { let msg = UpdateMsg::BroadcastTo { id: *id, key: *key, @@ -584,7 +586,7 @@ impl Operation for UpdateOp { sender: sender.clone(), target: peer.clone(), }; - let f = conn_manager.send(&peer.peer, msg.into()); + let f = conn_manager.send(peer_id, msg.into()); broadcasting.push(f); } let error_futures = futures::future::join_all(broadcasting) @@ -605,11 +607,11 @@ impl Operation for UpdateOp { let peer = broadcast_to.get(peer_num).unwrap(); tracing::warn!( "failed broadcasting update change to {} with error {}; dropping connection", - peer.peer, + peer.peer(), err ); // TODO: review this, maybe we should just dropping this subscription - conn_manager.drop_connection(&peer.peer).await?; + conn_manager.drop_connection(&peer.peer()).await?; incorrect_results += 1; } @@ -702,8 +704,8 @@ impl OpManager { .filter(|pk| { // Allow the sender (or ourselves) to stay in the broadcast list when we're // originating the UPDATE so local auto-subscribes still receive events. - let is_sender = &pk.peer == sender; - let is_self = self_peer.as_ref() == Some(&pk.peer); + let is_sender = &pk.peer() == sender; + let is_self = self_peer.as_ref() == Some(&pk.peer()); if is_sender || is_self { allow_self } else { @@ -723,7 +725,7 @@ impl OpManager { sender, subscribers .iter() - .map(|s| format!("{:.8}", s.peer)) + .map(|s| format!("{:.8}", s.peer())) .collect::>() .join(","), subscribers.len() @@ -735,7 +737,7 @@ impl OpManager { .ring .k_closest_potentially_caching(key, skip_slice, 5) .into_iter() - .map(|candidate| format!("{:.8}", candidate.peer)) + .map(|candidate| format!("{:.8}", candidate.peer())) .collect::>(); tracing::warn!( @@ -964,7 +966,7 @@ pub(crate) async fn request_update( // Clone and filter out self from subscribers to prevent self-targeting let mut filtered_subscribers: Vec<_> = subscribers .iter() - .filter(|sub| sub.peer != sender.peer) + .filter(|sub| sub.peer() != sender.peer()) .cloned() .collect(); filtered_subscribers.pop() @@ -978,7 +980,7 @@ pub(crate) async fn request_update( // Find the best peer to send the update to let remote_target = op_manager .ring - .closest_potentially_caching(&key, [sender.peer.clone()].as_slice()); + .closest_potentially_caching(&key, [sender.peer().clone()].as_slice()); if let Some(target) = remote_target { // Subscribe to the contract @@ -1038,7 +1040,7 @@ pub(crate) async fn request_update( } // Check if there are any subscribers to broadcast to - let broadcast_to = op_manager.get_broadcast_targets_update(&key, &sender.peer); + let broadcast_to = op_manager.get_broadcast_targets_update(&key, &sender.peer()); deliver_update_result(op_manager, id, key, summary.clone()).await?; @@ -1101,7 +1103,7 @@ pub(crate) async fn request_update( tracing::debug!( tx = %id, %key, - target_peer = %target.peer, + target_peer = %target.peer(), "Applying UPDATE locally before forwarding to target peer" ); diff --git a/crates/core/src/ring/connection_manager.rs b/crates/core/src/ring/connection_manager.rs index e04ac180f..711f14e84 100644 --- a/crates/core/src/ring/connection_manager.rs +++ b/crates/core/src/ring/connection_manager.rs @@ -291,7 +291,9 @@ impl ConnectionManager { Some(Location::new(location)) }; let peer = self.get_peer_key().expect("peer key not set"); - PeerKeyLocation { peer, location } + let mut pkl = PeerKeyLocation::from(peer); + pkl.location = location; + pkl } pub fn get_peer_key(&self) -> Option { @@ -376,7 +378,7 @@ impl ConnectionManager { ); let mut cbl = self.connections_by_location.write(); if let Some(prev_list) = cbl.get_mut(&prev_loc) { - if let Some(pos) = prev_list.iter().position(|c| c.location.peer == peer) { + if let Some(pos) = prev_list.iter().position(|c| c.location.peer() == peer) { prev_list.swap_remove(pos); } if prev_list.is_empty() { @@ -388,10 +390,7 @@ impl ConnectionManager { { let mut cbl = self.connections_by_location.write(); cbl.entry(loc).or_default().push(Connection { - location: PeerKeyLocation { - peer: peer.clone(), - location: Some(loc), - }, + location: PeerKeyLocation::with_location(peer.pub_key.clone(), peer.addr, loc), }); } } @@ -420,19 +419,22 @@ impl ConnectionManager { let entry = cbl.entry(loc).or_default(); if let Some(conn) = entry .iter_mut() - .find(|conn| conn.location.peer == *old_peer) + .find(|conn| conn.location.peer() == *old_peer) { - conn.location.peer = new_peer; + // Update the public key and address to match the new peer + conn.location.pub_key = new_peer.pub_key.clone(); + conn.location.set_addr(new_peer.addr); } else { tracing::warn!( %old_peer, "update_peer_identity: connection entry missing; creating placeholder" ); entry.push(Connection { - location: PeerKeyLocation { - peer: new_peer, - location: Some(loc), - }, + location: PeerKeyLocation::with_location( + new_peer.pub_key.clone(), + new_peer.addr, + loc, + ), }); } @@ -463,7 +465,7 @@ impl ConnectionManager { let conns = &mut *self.connections_by_location.write(); if let Some(conns) = conns.get_mut(&loc) { - if let Some(pos) = conns.iter().position(|c| &c.location.peer == peer) { + if let Some(pos) = conns.iter().position(|c| &c.location.peer() == peer) { conns.swap_remove(pos); } } @@ -536,15 +538,15 @@ impl ConnectionManager { .values() .filter_map(|conns| { let conn = conns.choose(&mut rand::rng())?; - if self.is_transient(&conn.location.peer) { + if self.is_transient(&conn.location.peer()) { return None; } if let Some(requester) = requesting { - if requester == &conn.location.peer { + if requester == &conn.location.peer() { return None; } } - (!skip_list.has_element(conn.location.peer.clone())) + (!skip_list.has_element(conn.location.peer().clone())) .then_some(conn.location.clone()) }) .collect(); diff --git a/crates/core/src/ring/mod.rs b/crates/core/src/ring/mod.rs index 6b2bf4a7e..e9e856ead 100644 --- a/crates/core/src/ring/mod.rs +++ b/crates/core/src/ring/mod.rs @@ -261,7 +261,7 @@ impl Ring { for peer in peers { if !self .connection_manager - .has_connection_or_pending(&peer.peer) + .has_connection_or_pending(&peer.peer()) { filtered.push(peer); } @@ -299,7 +299,7 @@ impl Ring { let connections = self.connection_manager.get_connections_by_location(); for conns in connections.values() { for conn in conns { - let peer = conn.location.peer.clone(); + let peer = conn.location.peer().clone(); if skip_list.has_element(peer.clone()) || !seen.insert(peer) { continue; } @@ -313,10 +313,11 @@ impl Ring { if skip_list.has_element(peer.clone()) || !seen.insert(peer.clone()) { continue; } - candidates.push(PeerKeyLocation { - peer, - location: Some(location), - }); + candidates.push(PeerKeyLocation::with_location( + peer.pub_key.clone(), + peer.addr, + location, + )); if candidates.len() >= k { break; } @@ -485,7 +486,7 @@ impl Ring { .map(|(loc, conns)| { let conns: Vec<_> = conns .iter() - .filter(|conn| !live_tx_tracker.has_live_connection(&conn.location.peer)) + .filter(|conn| !live_tx_tracker.has_live_connection(&conn.location.peer())) .cloned() .collect(); (*loc, conns) @@ -574,7 +575,7 @@ impl Ring { notifier .notifications_sender .send(Either::Right(crate::message::NodeEvent::DropConnection( - peer.peer, + peer.peer(), ))) .await .map_err(|error| { @@ -649,7 +650,7 @@ impl Ring { let joiner = self.connection_manager.own_location(); tracing::info!( this_peer = %joiner, - query_target_peer = %query_target.peer, + query_target_peer = %query_target.peer(), %ideal_location, "Sending connect request via connection_maintenance" ); @@ -665,7 +666,7 @@ impl Ring { op_manager.connect_forward_estimator.clone(), ); - live_tx_tracker.add_transaction(query_target.peer.clone(), tx); + live_tx_tracker.add_transaction(query_target.peer().clone(), tx); op_manager .push(tx, OpEnum::Connect(Box::new(op))) .await diff --git a/crates/core/src/ring/peer_key_location.rs b/crates/core/src/ring/peer_key_location.rs index 75db9bbe9..115b4d9e8 100644 --- a/crates/core/src/ring/peer_key_location.rs +++ b/crates/core/src/ring/peer_key_location.rs @@ -1,4 +1,5 @@ use super::Location; +#[allow(unused_imports)] // PeerId still used in some conversions during migration use crate::node::PeerId; use crate::transport::TransportPublicKey; use serde::{Deserialize, Serialize}; @@ -57,21 +58,43 @@ impl std::fmt::Display for PeerAddr { } } -#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] /// The location of a peer in the ring. This location allows routing towards the peer. +/// +/// # Identity vs Address +/// - `pub_key` is the true peer identity (cryptographic public key) +/// - `peer_addr` represents the network address, which may be unknown initially +/// +/// # Location +/// The `location` field is kept for compatibility but should eventually be computed +/// from the address using `Location::from_address()`. +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Hash)] pub struct PeerKeyLocation { - pub peer: PeerId, - // TODO: this shouldn't e an option, when we are using this struct the location is always known - /// An unspecified location means that the peer hasn't been asigned a location, yet. + /// The peer's cryptographic identity (public key). + pub pub_key: TransportPublicKey, + /// The peer's network address. May be Unknown if the peer doesn't know + /// their own external address (e.g., behind NAT). + pub peer_addr: PeerAddr, + /// An unspecified location means that the peer hasn't been assigned a location, yet. + /// Eventually this should be computed from addr instead of stored. pub location: Option, } impl PeerKeyLocation { - /// Creates a new PeerKeyLocation with the given public key and address. + /// Creates a new PeerKeyLocation with the given public key and known address. pub fn new(pub_key: TransportPublicKey, addr: SocketAddr) -> Self { PeerKeyLocation { - peer: PeerId::new(addr, pub_key), + pub_key, + peer_addr: PeerAddr::Known(addr), + location: None, + } + } + + /// Creates a new PeerKeyLocation with unknown address. + /// Used when a peer doesn't know their own external address (e.g., behind NAT). + pub fn with_unknown_addr(pub_key: TransportPublicKey) -> Self { + PeerKeyLocation { + pub_key, + peer_addr: PeerAddr::Unknown, location: None, } } @@ -83,27 +106,89 @@ impl PeerKeyLocation { location: Location, ) -> Self { PeerKeyLocation { - peer: PeerId::new(addr, pub_key), + pub_key, + peer_addr: PeerAddr::Known(addr), location: Some(location), } } - /// Returns the peer's public key (identity). + /// Returns a reference to the peer's public key (identity). #[inline] pub fn pub_key(&self) -> &TransportPublicKey { - &self.peer.pub_key + &self.pub_key } /// Returns the peer's socket address. + /// + /// # Panics + /// Panics if the address is unknown. Use `socket_addr()` for a safe version. #[inline] pub fn addr(&self) -> SocketAddr { - self.peer.addr + match &self.peer_addr { + PeerAddr::Known(addr) => *addr, + PeerAddr::Unknown => panic!( + "addr() called on PeerKeyLocation with unknown address; use socket_addr() instead" + ), + } + } + + /// Returns the peer's socket address if known, None otherwise. + #[inline] + pub fn socket_addr(&self) -> Option { + match &self.peer_addr { + PeerAddr::Known(addr) => Some(*addr), + PeerAddr::Unknown => None, + } + } + + /// Returns the peer as a PeerId for compatibility with existing code. + /// + /// # Panics + /// Panics if the address is unknown. + #[inline] + pub fn peer(&self) -> PeerId { + PeerId::new(self.addr(), self.pub_key.clone()) + } + + /// Computes the ring location from the address if known. + pub fn computed_location(&self) -> Option { + match &self.peer_addr { + PeerAddr::Known(addr) => Some(Location::from_address(addr)), + PeerAddr::Unknown => None, + } + } + + /// Sets the address from a known socket address. + /// Used when the first recipient fills in the sender's address from packet source. + pub fn set_addr(&mut self, addr: SocketAddr) { + self.peer_addr = PeerAddr::Known(addr); + } + + /// Creates a PeerId from this PeerKeyLocation. + /// Returns None if the address is unknown. + #[allow(dead_code)] // Will be removed once PeerId is deprecated + pub fn to_peer_id(&self) -> Option { + match &self.peer_addr { + PeerAddr::Known(addr) => Some(PeerId::new(*addr, self.pub_key.clone())), + PeerAddr::Unknown => None, + } } #[cfg(test)] pub fn random() -> Self { + use crate::transport::TransportKeypair; + use rand::Rng; + + let mut addr_bytes = [0u8; 4]; + rand::rng().fill(&mut addr_bytes[..]); + let port = crate::util::get_free_port().unwrap(); + let addr = SocketAddr::from((addr_bytes, port)); + + let pub_key = TransportKeypair::new().public().clone(); + PeerKeyLocation { - peer: PeerId::random(), + pub_key, + peer_addr: PeerAddr::Known(addr), location: Some(Location::random()), } } @@ -112,12 +197,31 @@ impl PeerKeyLocation { impl From for PeerKeyLocation { fn from(peer: PeerId) -> Self { PeerKeyLocation { - peer, + pub_key: peer.pub_key, + peer_addr: PeerAddr::Known(peer.addr), location: None, } } } +impl Ord for PeerKeyLocation { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // Order by address when both are known, otherwise order unknown addresses last + match (&self.peer_addr, &other.peer_addr) { + (PeerAddr::Known(a), PeerAddr::Known(b)) => a.cmp(b), + (PeerAddr::Known(_), PeerAddr::Unknown) => std::cmp::Ordering::Less, + (PeerAddr::Unknown, PeerAddr::Known(_)) => std::cmp::Ordering::Greater, + (PeerAddr::Unknown, PeerAddr::Unknown) => std::cmp::Ordering::Equal, + } + } +} + +impl PartialOrd for PeerKeyLocation { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + impl std::fmt::Debug for PeerKeyLocation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { ::fmt(self, f) @@ -127,8 +231,21 @@ impl std::fmt::Debug for PeerKeyLocation { impl Display for PeerKeyLocation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.location { - Some(loc) => write!(f, "{} (@ {loc})", self.peer), - None => write!(f, "{}", self.peer), + Some(loc) => write!(f, "{:?}@{} (@ {loc})", self.pub_key, self.peer_addr), + None => write!(f, "{:?}@{}", self.pub_key, self.peer_addr), } } } + +#[cfg(test)] +impl<'a> arbitrary::Arbitrary<'a> for PeerKeyLocation { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let peer_id: PeerId = u.arbitrary()?; + let location: Option = u.arbitrary()?; + Ok(PeerKeyLocation { + pub_key: peer_id.pub_key, + peer_addr: PeerAddr::Known(peer_id.addr), + location, + }) + } +} diff --git a/crates/core/src/ring/seeding.rs b/crates/core/src/ring/seeding.rs index 3474b542a..5b3c940b8 100644 --- a/crates/core/src/ring/seeding.rs +++ b/crates/core/src/ring/seeding.rs @@ -113,11 +113,11 @@ impl SeedingManager { .or_insert(Vec::with_capacity(Self::TOTAL_MAX_SUBSCRIPTIONS)); let before = subs .iter() - .map(|loc| format!("{:.8}", loc.peer)) + .map(|loc| format!("{:.8}", loc.peer())) .collect::>(); info!( %contract, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_before = ?before, current_len = subs.len(), "seeding_manager: attempting to add subscriber" @@ -125,7 +125,7 @@ impl SeedingManager { if subs.len() >= Self::MAX_SUBSCRIBERS { warn!( %contract, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_before = ?before, "seeding_manager: max subscribers reached" ); @@ -136,7 +136,7 @@ impl SeedingManager { Ok(_) => { info!( %contract, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_before = ?before, "seeding_manager: subscriber already registered" ); @@ -146,7 +146,7 @@ impl SeedingManager { if subs_vec.len() == Self::MAX_SUBSCRIBERS { warn!( %contract, - subscriber = %subscriber.peer, + subscriber = %subscriber.peer(), subscribers_before = ?before, "seeding_manager: max subscribers reached during insert" ); @@ -155,7 +155,7 @@ impl SeedingManager { subs_vec.insert(next_idx, subscriber); let after = subs_vec .iter() - .map(|loc| format!("{:.8}", loc.peer)) + .map(|loc| format!("{:.8}", loc.peer())) .collect::>(); info!( %contract, @@ -181,7 +181,7 @@ impl SeedingManager { let removed = subs[pos].clone(); tracing::debug!( %contract_key, - removed_peer = %removed.peer, + removed_peer = %removed.peer(), removed_location = ?removed.location, "seeding_manager: pruning subscriber due to location match" ); @@ -194,7 +194,7 @@ impl SeedingManager { /// Remove a subscriber by peer ID from a specific contract pub fn remove_subscriber_by_peer(&self, contract: &ContractKey, peer: &crate::node::PeerId) { if let Some(mut subs) = self.subscribers.get_mut(contract) { - if let Some(pos) = subs.iter().position(|l| &l.peer == peer) { + if let Some(pos) = subs.iter().position(|l| &l.peer() == peer) { subs.swap_remove(pos); tracing::debug!( "Removed peer {} from subscriber list for contract {}", @@ -239,18 +239,21 @@ mod tests { let peer2 = test_peer_id(2); let peer3 = test_peer_id(3); - let peer_loc1 = PeerKeyLocation { - peer: peer1.clone(), - location: Some(Location::try_from(0.1).unwrap()), - }; - let peer_loc2 = PeerKeyLocation { - peer: peer2.clone(), - location: Some(Location::try_from(0.2).unwrap()), - }; - let peer_loc3 = PeerKeyLocation { - peer: peer3.clone(), - location: Some(Location::try_from(0.3).unwrap()), - }; + let peer_loc1 = PeerKeyLocation::with_location( + peer1.pub_key.clone(), + peer1.addr, + Location::try_from(0.1).unwrap(), + ); + let peer_loc2 = PeerKeyLocation::with_location( + peer2.pub_key.clone(), + peer2.addr, + Location::try_from(0.2).unwrap(), + ); + let peer_loc3 = PeerKeyLocation::with_location( + peer3.pub_key.clone(), + peer3.addr, + Location::try_from(0.3).unwrap(), + ); // Add subscribers assert!(seeding_manager @@ -276,9 +279,9 @@ mod tests { { let subs = seeding_manager.subscribers_of(&contract_key).unwrap(); assert_eq!(subs.len(), 2); - assert!(!subs.iter().any(|p| p.peer == peer2)); - assert!(subs.iter().any(|p| p.peer == peer1)); - assert!(subs.iter().any(|p| p.peer == peer3)); + assert!(!subs.iter().any(|p| p.peer() == peer2)); + assert!(subs.iter().any(|p| p.peer() == peer1)); + assert!(subs.iter().any(|p| p.peer() == peer3)); } // Remove peer1 @@ -288,8 +291,8 @@ mod tests { { let subs = seeding_manager.subscribers_of(&contract_key).unwrap(); assert_eq!(subs.len(), 1); - assert!(!subs.iter().any(|p| p.peer == peer1)); - assert!(subs.iter().any(|p| p.peer == peer3)); + assert!(!subs.iter().any(|p| p.peer() == peer1)); + assert!(subs.iter().any(|p| p.peer() == peer3)); } // Remove non-existent peer (should not error) diff --git a/crates/core/src/router/isotonic_estimator.rs b/crates/core/src/router/isotonic_estimator.rs index 267ed9d08..ec1a1fc1f 100644 --- a/crates/core/src/router/isotonic_estimator.rs +++ b/crates/core/src/router/isotonic_estimator.rs @@ -329,7 +329,7 @@ mod tests { ) -> IsotonicEvent { let distance: f64 = peer.location.unwrap().distance(contract_location).as_f64(); - let result = distance.powf(0.5) + peer.peer.clone().to_bytes()[0] as f64; + let result = distance.powf(0.5) + peer.peer().clone().to_bytes()[0] as f64; IsotonicEvent { peer, contract_location, @@ -343,7 +343,7 @@ mod tests { ) -> IsotonicEvent { let distance: f64 = peer.location.unwrap().distance(contract_location).as_f64(); - let result = (100.0 - distance).powf(0.5) + peer.peer.clone().to_bytes()[0] as f64; + let result = (100.0 - distance).powf(0.5) + peer.peer().clone().to_bytes()[0] as f64; IsotonicEvent { peer, contract_location, diff --git a/crates/core/src/topology/mod.rs b/crates/core/src/topology/mod.rs index 447302010..77135232c 100644 --- a/crates/core/src/topology/mod.rs +++ b/crates/core/src/topology/mod.rs @@ -456,7 +456,7 @@ impl TopologyManager { info!( current_connections, max_allowed = self.limits.max_connections, - %peer.peer, + peer = %peer.peer(), "Enforcing max-connections cap via fallback removal" ); adj = TopologyAdjustment::RemoveConnections(vec![peer]); diff --git a/crates/core/src/tracing/mod.rs b/crates/core/src/tracing/mod.rs index de12b5ee0..aa8c8e424 100644 --- a/crates/core/src/tracing/mod.rs +++ b/crates/core/src/tracing/mod.rs @@ -140,10 +140,11 @@ impl<'a> NetEventLog<'a> { peer_id, kind: EventKind::Connect(ConnectEvent::Connected { this: ring.connection_manager.own_location(), - connected: PeerKeyLocation { - peer, - location: Some(location), - }, + connected: PeerKeyLocation::with_location( + peer.pub_key.clone(), + peer.addr, + location, + ), }), } } @@ -192,7 +193,7 @@ impl<'a> NetEventLog<'a> { let events = vec![ NetEventLog { tx: msg.id(), - peer_id: acceptor.peer.clone(), + peer_id: acceptor.peer().clone(), kind: EventKind::Connect(ConnectEvent::Connected { this: acceptor.clone(), connected: target.clone(), @@ -200,7 +201,7 @@ impl<'a> NetEventLog<'a> { }, NetEventLog { tx: msg.id(), - peer_id: target.peer.clone(), + peer_id: target.peer().clone(), kind: EventKind::Connect(ConnectEvent::Connected { this: target.clone(), connected: acceptor, @@ -658,20 +659,20 @@ async fn send_to_metrics_server( let res = match &send_msg.kind { EventKind::Connect(ConnectEvent::Connected { this: - PeerKeyLocation { - peer: from_peer, + this_peer @ PeerKeyLocation { location: Some(from_loc), + .. }, connected: - PeerKeyLocation { - peer: to_peer, + connected_peer @ PeerKeyLocation { location: Some(to_loc), + .. }, }) => { let msg = PeerChange::added_connection_msg( (&send_msg.tx != Transaction::NULL).then(|| send_msg.tx.to_string()), - (from_peer.clone().to_string(), from_loc.as_f64()), - (to_peer.clone().to_string(), to_loc.as_f64()), + (this_peer.peer().to_string(), from_loc.as_f64()), + (connected_peer.peer().to_string(), to_loc.as_f64()), ); ws_stream.send(Message::Binary(msg.into())).await } @@ -694,7 +695,7 @@ async fn send_to_metrics_server( send_msg.tx.to_string(), key.to_string(), requester.to_string(), - target.peer.to_string(), + target.peer().to_string(), *timestamp, contract_location.as_f64(), ); @@ -714,7 +715,7 @@ async fn send_to_metrics_server( send_msg.tx.to_string(), key.to_string(), requester.to_string(), - target.peer.to_string(), + target.peer().to_string(), *timestamp, contract_location.as_f64(), ); @@ -794,7 +795,7 @@ async fn send_to_metrics_server( id.to_string(), key.to_string(), contract_location.as_f64(), - at.peer.to_string(), + at.peer().to_string(), at.location.unwrap().as_f64(), *timestamp, ); @@ -813,7 +814,7 @@ async fn send_to_metrics_server( id.to_string(), key.to_string(), requester.to_string(), - target.peer.to_string(), + target.peer().to_string(), *timestamp, contract_location.as_f64(), ); @@ -831,7 +832,7 @@ async fn send_to_metrics_server( id.to_string(), key.to_string(), requester.to_string(), - target.peer.to_string(), + target.peer().to_string(), *timestamp, contract_location.as_f64(), ); @@ -1547,13 +1548,16 @@ pub(super) mod test { if let EventKind::Connect(ConnectEvent::Connected { this, connected }) = &l.kind { let disconnected = disconnects - .get(&connected.peer) + .get(&connected.peer()) .iter() .flat_map(|dcs| dcs.iter()) .any(|dc| dc > &l.datetime); if let Some((this_loc, conn_loc)) = this.location.zip(connected.location) { if this.pub_key() == key && !disconnected { - return Some((connected.peer.clone(), conn_loc.distance(this_loc))); + return Some(( + connected.peer().clone(), + conn_loc.distance(this_loc), + )); } } } @@ -1646,14 +1650,16 @@ pub(super) mod test { tx: &tx, peer_id: peer_id.clone(), kind: EventKind::Connect(ConnectEvent::Connected { - this: PeerKeyLocation { - peer: peer_id.clone(), - location: Some(loc), - }, - connected: PeerKeyLocation { - peer: other.clone(), - location: Some(*location), - }, + this: PeerKeyLocation::with_location( + peer_id.pub_key.clone(), + peer_id.addr, + loc, + ), + connected: PeerKeyLocation::with_location( + other.pub_key.clone(), + other.addr, + *location, + ), }), })) }, From 316998d20ca4d18ffbad40a2d41bc08f9cc237f6 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 17:20:25 -0600 Subject: [PATCH 05/32] refactor(ring): remove dead_code allows from PeerAddr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The PeerAddr enum and impl block are actively used, so the dead_code warnings are false positives. Remove the allows. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/ring/peer_key_location.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/core/src/ring/peer_key_location.rs b/crates/core/src/ring/peer_key_location.rs index 115b4d9e8..e04781647 100644 --- a/crates/core/src/ring/peer_key_location.rs +++ b/crates/core/src/ring/peer_key_location.rs @@ -13,7 +13,6 @@ use std::{fmt::Display, hash::Hash}; /// The first recipient should fill this in from the packet source address. /// - `Known`: The address is known and explicitly specified. #[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Hash, Debug)] -#[allow(dead_code)] // Will be used as refactoring progresses pub enum PeerAddr { /// Address unknown - will be filled in by first recipient from packet source. /// Used when a peer doesn't know its own external address (e.g., behind NAT). @@ -22,7 +21,6 @@ pub enum PeerAddr { Known(SocketAddr), } -#[allow(dead_code)] // Will be used as refactoring progresses impl PeerAddr { /// Returns the socket address if known, None otherwise. pub fn as_known(&self) -> Option<&SocketAddr> { From 36cad436dd1b0ada7f56705a781cd4ef3085cd64 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 17:37:59 -0600 Subject: [PATCH 06/32] refactor(ring): change LiveTransactionTracker key from PeerId to SocketAddr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Transactions are tied to network connections, not cryptographic identities. Using SocketAddr as the key reflects this and aligns with the broader refactoring to separate identity from address. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/node/op_state_manager.rs | 2 +- crates/core/src/ring/live_tx.rs | 25 ++++++++++++++---------- crates/core/src/ring/mod.rs | 6 +++--- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/crates/core/src/node/op_state_manager.rs b/crates/core/src/node/op_state_manager.rs index fc92682c4..b84ec3aca 100644 --- a/crates/core/src/node/op_state_manager.rs +++ b/crates/core/src/node/op_state_manager.rs @@ -646,7 +646,7 @@ impl OpManager { } self.ring .live_tx_tracker - .add_transaction(peer.clone(), *transaction); + .add_transaction(peer.addr, *transaction); } } diff --git a/crates/core/src/ring/live_tx.rs b/crates/core/src/ring/live_tx.rs index d9750683f..23bf5a43f 100644 --- a/crates/core/src/ring/live_tx.rs +++ b/crates/core/src/ring/live_tx.rs @@ -1,23 +1,28 @@ -use crate::{message::Transaction, node::PeerId}; +use crate::message::Transaction; use dashmap::DashMap; +use std::net::SocketAddr; use std::sync::Arc; +/// Tracks live transactions per peer address. +/// +/// Uses `SocketAddr` as the key since transactions are tied to network connections, +/// not cryptographic identities. #[derive(Clone)] pub struct LiveTransactionTracker { - tx_per_peer: Arc>>, + tx_per_peer: Arc>>, } impl LiveTransactionTracker { - pub fn add_transaction(&self, peer: PeerId, tx: Transaction) { - self.tx_per_peer.entry(peer).or_default().push(tx); + pub fn add_transaction(&self, peer_addr: SocketAddr, tx: Transaction) { + self.tx_per_peer.entry(peer_addr).or_default().push(tx); } pub fn remove_finished_transaction(&self, tx: Transaction) { - let keys_to_remove: Vec = self + let keys_to_remove: Vec = self .tx_per_peer .iter() .filter(|entry| entry.value().iter().any(|otx| otx == &tx)) - .map(|entry| entry.key().clone()) + .map(|entry| *entry.key()) .collect(); for k in keys_to_remove { @@ -34,12 +39,12 @@ impl LiveTransactionTracker { } } - pub(crate) fn prune_transactions_from_peer(&self, peer: &PeerId) { - self.tx_per_peer.remove(peer); + pub(crate) fn prune_transactions_from_peer(&self, peer_addr: SocketAddr) { + self.tx_per_peer.remove(&peer_addr); } - pub(crate) fn has_live_connection(&self, peer: &PeerId) -> bool { - self.tx_per_peer.contains_key(peer) + pub(crate) fn has_live_connection(&self, peer_addr: SocketAddr) -> bool { + self.tx_per_peer.contains_key(&peer_addr) } pub(crate) fn still_alive(&self, tx: &Transaction) -> bool { diff --git a/crates/core/src/ring/mod.rs b/crates/core/src/ring/mod.rs index e9e856ead..572716ebb 100644 --- a/crates/core/src/ring/mod.rs +++ b/crates/core/src/ring/mod.rs @@ -368,7 +368,7 @@ impl Ring { pub async fn prune_connection(&self, peer: PeerId) { tracing::debug!(%peer, "Removing connection"); - self.live_tx_tracker.prune_transactions_from_peer(&peer); + self.live_tx_tracker.prune_transactions_from_peer(peer.addr); // This case would be when a connection is being open, so peer location hasn't been recorded yet and we can ignore everything below let Some(loc) = self.connection_manager.prune_alive_connection(&peer) else { return; @@ -486,7 +486,7 @@ impl Ring { .map(|(loc, conns)| { let conns: Vec<_> = conns .iter() - .filter(|conn| !live_tx_tracker.has_live_connection(&conn.location.peer())) + .filter(|conn| !live_tx_tracker.has_live_connection(conn.location.addr())) .cloned() .collect(); (*loc, conns) @@ -666,7 +666,7 @@ impl Ring { op_manager.connect_forward_estimator.clone(), ); - live_tx_tracker.add_transaction(query_target.peer().clone(), tx); + live_tx_tracker.add_transaction(query_target.addr(), tx); op_manager .push(tx, OpEnum::Connect(Box::new(op))) .await From 3f1fabab47684ea54822a17d8f8279e6c62214e9 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 18:05:30 -0600 Subject: [PATCH 07/32] refactor(network): change NetworkBridge trait to use SocketAddr instead of PeerId MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Key changes: - NetworkBridge::send() and drop_connection() now take SocketAddr - NodeEvent::DropConnection now uses SocketAddr - P2pBridge looks up PeerId from accepted_peers when needed - ConnectionManager.get_peer_by_addr() added for reverse lookup - Fixed RelayState to use set_addr() instead of attempting mutation through temporary .peer() value - Fixed clippy warning in subscribe.rs (ok_or_else -> ok_or) All 244 unit tests pass. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/message.rs | 4 +- crates/core/src/node/network_bridge.rs | 26 ++- .../core/src/node/network_bridge/in_memory.rs | 15 +- .../src/node/network_bridge/p2p_protoc.rs | 159 +++++++++++------- crates/core/src/node/testing_impl.rs | 24 ++- crates/core/src/operations/connect.rs | 6 +- crates/core/src/operations/mod.rs | 8 +- crates/core/src/operations/put.rs | 14 +- crates/core/src/operations/subscribe.rs | 2 +- crates/core/src/operations/update.rs | 8 +- crates/core/src/ring/connection_manager.rs | 26 ++- crates/core/src/ring/mod.rs | 2 +- 12 files changed, 188 insertions(+), 106 deletions(-) diff --git a/crates/core/src/message.rs b/crates/core/src/message.rs index 4481ad204..84146adc5 100644 --- a/crates/core/src/message.rs +++ b/crates/core/src/message.rs @@ -333,8 +333,8 @@ type ConnectResult = Result<(PeerId, RemainingChecks), ()>; /// Internal node events emitted to the event loop. #[derive(Debug, Clone)] pub(crate) enum NodeEvent { - /// Drop the given peer connection. - DropConnection(PeerId), + /// Drop the given peer connection by socket address. + DropConnection(std::net::SocketAddr), // Try connecting to the given peer. ConnectPeer { peer: PeerId, diff --git a/crates/core/src/node/network_bridge.rs b/crates/core/src/node/network_bridge.rs index df659f637..d47a955be 100644 --- a/crates/core/src/node/network_bridge.rs +++ b/crates/core/src/node/network_bridge.rs @@ -8,12 +8,12 @@ //! See [`../../architecture.md`](../../architecture.md) for its interactions with event loops and other components. use std::future::Future; +use std::net::SocketAddr; use either::Either; use serde::{Deserialize, Serialize}; use tokio::sync::mpsc::{self, Receiver, Sender}; -use super::PeerId; use crate::message::{NetMessage, NodeEvent}; mod handshake; @@ -25,19 +25,29 @@ pub(crate) type ConnResult = std::result::Result; /// Allows handling of connections to the network as well as sending messages /// to other peers in the network with whom connection has been established. +/// +/// Connections are keyed by socket address since that's what identifies +/// a network connection. The cryptographic identity is handled separately +/// at the transport layer. pub(crate) trait NetworkBridge: Send + Sync { - fn drop_connection(&mut self, peer: &PeerId) -> impl Future> + Send; - - fn send(&self, target: &PeerId, msg: NetMessage) - -> impl Future> + Send; + fn drop_connection( + &mut self, + peer_addr: SocketAddr, + ) -> impl Future> + Send; + + fn send( + &self, + target_addr: SocketAddr, + msg: NetMessage, + ) -> impl Future> + Send; } #[derive(Debug, thiserror::Error, Serialize, Deserialize)] pub(crate) enum ConnectionError { #[error("location unknown for this node")] LocationUnknown, - #[error("unable to send message")] - SendNotCompleted(PeerId), + #[error("unable to send message to {0}")] + SendNotCompleted(SocketAddr), #[error("Unexpected connection req")] UnexpectedReq, #[error("error while de/serializing message")] @@ -76,7 +86,7 @@ impl Clone for ConnectionError { match self { Self::LocationUnknown => Self::LocationUnknown, Self::Serialization(_) => Self::Serialization(None), - Self::SendNotCompleted(peer) => Self::SendNotCompleted(peer.clone()), + Self::SendNotCompleted(addr) => Self::SendNotCompleted(*addr), Self::IOError(err) => Self::IOError(err.clone()), Self::Timeout => Self::Timeout, Self::UnexpectedReq => Self::UnexpectedReq, diff --git a/crates/core/src/node/network_bridge/in_memory.rs b/crates/core/src/node/network_bridge/in_memory.rs index 9ecd5e84c..ff12ad845 100644 --- a/crates/core/src/node/network_bridge/in_memory.rs +++ b/crates/core/src/node/network_bridge/in_memory.rs @@ -2,6 +2,7 @@ use std::{ collections::HashMap, io::Cursor, + net::SocketAddr, sync::{Arc, LazyLock}, time::{Duration, Instant}, }; @@ -10,11 +11,11 @@ use crossbeam::channel::{self, Receiver, Sender}; use rand::{prelude::StdRng, seq::SliceRandom, Rng, SeedableRng}; use tokio::sync::Mutex; -use super::{ConnectionError, NetworkBridge, PeerId}; +use super::{ConnectionError, NetworkBridge}; use crate::{ config::GlobalExecutor, message::NetMessage, - node::{testing_impl::NetworkBridgeExt, NetEventRegister, OpManager}, + node::{testing_impl::NetworkBridgeExt, NetEventRegister, OpManager, PeerId}, tracing::NetEventLog, }; @@ -60,17 +61,19 @@ impl MemoryConnManager { } impl NetworkBridge for MemoryConnManager { - async fn send(&self, target: &PeerId, msg: NetMessage) -> super::ConnResult<()> { + async fn send(&self, target_addr: SocketAddr, msg: NetMessage) -> super::ConnResult<()> { self.log_register .register_events(NetEventLog::from_outbound_msg(&msg, &self.op_manager.ring)) .await; - self.op_manager.sending_transaction(target, &msg); + // Create a temporary PeerId for tracking (in-memory transport uses PeerId internally) + let target_peer = PeerId::new(target_addr, self.transport.interface_peer.pub_key.clone()); + self.op_manager.sending_transaction(&target_peer, &msg); let msg = bincode::serialize(&msg)?; - self.transport.send(target.clone(), msg); + self.transport.send(target_peer, msg); Ok(()) } - async fn drop_connection(&mut self, _peer: &PeerId) -> super::ConnResult<()> { + async fn drop_connection(&mut self, _peer_addr: SocketAddr) -> super::ConnResult<()> { Ok(()) } } diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index b1d3f2825..a2896cf87 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -76,30 +76,61 @@ impl P2pBridge { } impl NetworkBridge for P2pBridge { - async fn drop_connection(&mut self, peer: &PeerId) -> super::ConnResult<()> { - self.accepted_peers.remove(peer); - self.ev_listener_tx - .send(Right(NodeEvent::DropConnection(peer.clone()))) - .await - .map_err(|_| ConnectionError::SendNotCompleted(peer.clone()))?; - self.log_register - .register_events(Either::Left(NetEventLog::disconnected( - &self.op_manager.ring, - peer, - ))) - .await; + async fn drop_connection(&mut self, peer_addr: SocketAddr) -> super::ConnResult<()> { + // Find the peer by address and remove it + let peer = self + .accepted_peers + .iter() + .find(|p| p.addr == peer_addr) + .map(|p| p.clone()); + if let Some(peer) = peer { + self.accepted_peers.remove(&peer); + self.ev_listener_tx + .send(Right(NodeEvent::DropConnection(peer_addr))) + .await + .map_err(|_| ConnectionError::SendNotCompleted(peer_addr))?; + self.log_register + .register_events(Either::Left(NetEventLog::disconnected( + &self.op_manager.ring, + &peer, + ))) + .await; + } Ok(()) } - async fn send(&self, target: &PeerId, msg: NetMessage) -> super::ConnResult<()> { + async fn send(&self, target_addr: SocketAddr, msg: NetMessage) -> super::ConnResult<()> { self.log_register .register_events(NetEventLog::from_outbound_msg(&msg, &self.op_manager.ring)) .await; - self.op_manager.sending_transaction(target, &msg); - self.ev_listener_tx - .send(Left((target.clone(), Box::new(msg)))) - .await - .map_err(|_| ConnectionError::SendNotCompleted(target.clone()))?; + // Look up the full PeerId from accepted_peers for transaction tracking and sending + let target = self + .accepted_peers + .iter() + .find(|p| p.addr == target_addr) + .map(|p| p.clone()); + if let Some(ref target) = target { + self.op_manager.sending_transaction(target, &msg); + self.ev_listener_tx + .send(Left((target.clone(), Box::new(msg)))) + .await + .map_err(|_| ConnectionError::SendNotCompleted(target_addr))?; + } else { + // No known peer at this address - create a temporary PeerId for the event + // This should rarely happen in practice + tracing::warn!( + %target_addr, + "Sending to unknown peer address - creating temporary PeerId" + ); + let temp_peer = PeerId::new( + target_addr, + (*self.op_manager.ring.connection_manager.pub_key).clone(), + ); + self.ev_listener_tx + .send(Left((temp_peer, Box::new(msg)))) + .await + .map_err(|_| ConnectionError::SendNotCompleted(target_addr))?; + } Ok(()) } } @@ -623,48 +654,60 @@ impl P2pConnManager { } } ConnEvent::NodeAction(action) => match action { - NodeEvent::DropConnection(peer) => { - tracing::debug!(self_peer = %ctx.bridge.op_manager.ring.connection_manager.pub_key, %peer, conn_map_size = ctx.connections.len(), "[CONN_TRACK] REMOVE: DropConnection event - removing from connections HashMap"); - if let Err(error) = handshake_cmd_sender - .send(HandshakeCommand::DropConnection { peer: peer.clone() }) - .await - { - tracing::warn!( - %peer, - ?error, - "Failed to enqueue DropConnection command" - ); - } - // Immediately prune topology counters so we don't leak open connection slots. - ctx.bridge - .op_manager - .ring - .prune_connection(peer.clone()) - .await; - if let Some(conn) = ctx.connections.remove(&peer) { - // TODO: review: this could potentially leave garbage tasks in the background with peer listener - match timeout( - Duration::from_secs(1), - conn.send(Right(ConnEvent::NodeAction( - NodeEvent::DropConnection(peer), - ))), - ) - .await + NodeEvent::DropConnection(peer_addr) => { + // Find the PeerId by socket address + let peer = ctx + .connections + .keys() + .find(|p| p.addr == peer_addr) + .cloned(); + if let Some(peer) = peer { + tracing::debug!(self_peer = %ctx.bridge.op_manager.ring.connection_manager.pub_key, %peer, conn_map_size = ctx.connections.len(), "[CONN_TRACK] REMOVE: DropConnection event - removing from connections HashMap"); + if let Err(error) = handshake_cmd_sender + .send(HandshakeCommand::DropConnection { + peer: peer.clone(), + }) + .await { - Ok(Ok(())) => {} - Ok(Err(send_error)) => { - tracing::error!( - ?send_error, - "Failed to send drop connection message" - ); - } - Err(elapsed) => { - tracing::error!( - ?elapsed, - "Timeout while sending drop connection message" - ); + tracing::warn!( + %peer, + ?error, + "Failed to enqueue DropConnection command" + ); + } + // Immediately prune topology counters so we don't leak open connection slots. + ctx.bridge + .op_manager + .ring + .prune_connection(peer.clone()) + .await; + if let Some(conn) = ctx.connections.remove(&peer) { + // TODO: review: this could potentially leave garbage tasks in the background with peer listener + match timeout( + Duration::from_secs(1), + conn.send(Right(ConnEvent::NodeAction( + NodeEvent::DropConnection(peer_addr), + ))), + ) + .await + { + Ok(Ok(())) => {} + Ok(Err(send_error)) => { + tracing::error!( + ?send_error, + "Failed to send drop connection message" + ); + } + Err(elapsed) => { + tracing::error!( + ?elapsed, + "Timeout while sending drop connection message" + ); + } } } + } else { + tracing::debug!(%peer_addr, "DropConnection for unknown address - ignoring"); } } NodeEvent::ConnectPeer { @@ -1881,7 +1924,7 @@ impl P2pConnManager { async move { tracing::info!(%peer, "Transient connection expired; dropping"); if let Err(err) = drop_tx - .send(Right(NodeEvent::DropConnection(peer.clone()))) + .send(Right(NodeEvent::DropConnection(peer.addr))) .await { tracing::warn!( diff --git a/crates/core/src/node/testing_impl.rs b/crates/core/src/node/testing_impl.rs index 75a49cbb9..14a51ed54 100644 --- a/crates/core/src/node/testing_impl.rs +++ b/crates/core/src/node/testing_impl.rs @@ -899,15 +899,21 @@ where let msg = match msg { Ok(Either::Left(msg)) => msg, Ok(Either::Right(action)) => match action { - NodeEvent::DropConnection(peer) => { - tracing::info!("Dropping connection to {peer}"); - event_register - .register_events(Either::Left(crate::tracing::NetEventLog::disconnected( - &op_manager.ring, - &peer, - ))) - .await; - op_manager.ring.prune_connection(peer).await; + NodeEvent::DropConnection(peer_addr) => { + tracing::info!("Dropping connection to {peer_addr}"); + // Look up the peer by address in the ring + if let Some(peer) = op_manager + .ring + .connection_manager + .get_peer_by_addr(peer_addr) + { + event_register + .register_events(Either::Left( + crate::tracing::NetEventLog::disconnected(&op_manager.ring, &peer), + )) + .await; + op_manager.ring.prune_connection(peer).await; + } continue; } NodeEvent::ConnectPeer { peer, .. } => { diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index 2a447dfd3..59425d13b 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -807,7 +807,7 @@ impl Operation for ConnectOp { address, }; network_bridge - .send(&target.peer(), NetMessage::V1(NetMessageV1::Connect(msg))) + .send(target.addr(), NetMessage::V1(NetMessageV1::Connect(msg))) .await?; } @@ -830,7 +830,7 @@ impl Operation for ConnectOp { }; network_bridge .send( - &next.peer(), + next.addr(), NetMessage::V1(NetMessageV1::Connect(forward_msg)), ) .await?; @@ -939,7 +939,7 @@ impl Operation for ConnectOp { }; network_bridge .send( - &upstream.peer(), + upstream.addr(), NetMessage::V1(NetMessageV1::Connect(forward_msg)), ) .await?; diff --git a/crates/core/src/operations/mod.rs b/crates/core/src/operations/mod.rs index 1686bae70..18626b344 100644 --- a/crates/core/src/operations/mod.rs +++ b/crates/core/src/operations/mod.rs @@ -97,7 +97,7 @@ where Err(err) => { if let Some(sender) = sender { network_bridge - .send(&sender, NetMessage::V1(NetMessageV1::Aborted(tx_id))) + .send(sender.addr, NetMessage::V1(NetMessageV1::Aborted(tx_id))) .await?; } return Err(err); @@ -145,7 +145,7 @@ where op_manager.completed(id); if let Some(target) = msg.target() { tracing::debug!(%id, %target, "sending final message to target"); - network_bridge.send(&target.peer(), msg).await?; + network_bridge.send(target.addr(), msg).await?; } return Ok(Some(updated_state)); } else { @@ -153,7 +153,7 @@ where tracing::debug!(%id, "operation in progress"); if let Some(target) = msg.target() { tracing::debug!(%id, %target, "sending updated op state"); - network_bridge.send(&target.peer(), msg).await?; + network_bridge.send(target.addr(), msg).await?; op_manager.push(id, updated_state).await?; } else { tracing::debug!(%id, "queueing op state for local processing"); @@ -187,7 +187,7 @@ where if let Some(target) = msg.target() { tracing::debug!(%tx_id, target=%target.peer(), "sending back message to target"); - network_bridge.send(&target.peer(), msg).await?; + network_bridge.send(target.addr(), msg).await?; } } Ok(OperationResult { diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index aadefc3f5..0ef178e8a 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -564,16 +564,14 @@ impl Operation for PutOp { ); conn_manager - .send(&upstream.peer(), NetMessage::from(ack)) + .send(upstream.addr(), NetMessage::from(ack)) .await?; new_state = None; } // Broadcast to all peers in parallel - // Collect peer_ids first to ensure they outlive the futures - let peer_ids: Vec<_> = broadcast_to.iter().map(|p| p.peer()).collect(); let mut broadcasting = Vec::with_capacity(broadcast_to.len()); - for (peer, peer_id) in broadcast_to.iter().zip(peer_ids.iter()) { + for peer in broadcast_to.iter() { let msg = PutMsg::BroadcastTo { id: *id, key: *key, @@ -583,7 +581,7 @@ impl Operation for PutOp { contract: contract.clone(), target: peer.clone(), }; - let f = conn_manager.send(peer_id, msg.into()); + let f = conn_manager.send(peer.addr(), msg.into()); broadcasting.push(f); } @@ -611,7 +609,7 @@ impl Operation for PutOp { err ); // todo: review this, maybe we should just dropping this subscription - conn_manager.drop_connection(&peer.peer()).await?; + conn_manager.drop_connection(peer.addr()).await?; incorrect_results += 1; } @@ -859,7 +857,7 @@ impl Operation for PutOp { for subscriber in old_subscribers { conn_manager .send( - &subscriber.peer(), + subscriber.addr(), NetMessage::V1(NetMessageV1::Unsubscribed { transaction: Transaction::new::(), key: dropped_key, @@ -1489,7 +1487,7 @@ where let _ = conn_manager .send( - &peer.peer(), + peer.addr(), (PutMsg::PutForward { id, sender: own_pkloc, diff --git a/crates/core/src/operations/subscribe.rs b/crates/core/src/operations/subscribe.rs index 547282991..e5ee9f852 100644 --- a/crates/core/src/operations/subscribe.rs +++ b/crates/core/src/operations/subscribe.rs @@ -492,7 +492,7 @@ impl Operation for SubscribeOp { .k_closest_potentially_caching(key, &skip, 3) .into_iter() .find(|candidate| candidate.peer() != own_loc.peer()) - .ok_or_else(|| RingError::NoCachingPeers(*key)) + .ok_or(RingError::NoCachingPeers(*key)) .map_err(OpError::from)?; skip.insert(forward_target.peer().clone()); diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index 25ad178b1..2b2b10a63 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -574,11 +574,9 @@ impl Operation for UpdateOp { let sender = op_manager.ring.connection_manager.own_location(); let mut broadcasted_to = *broadcasted_to; - // Collect peer_ids first to ensure they outlive the futures - let peer_ids: Vec<_> = broadcast_to.iter().map(|p| p.peer()).collect(); let mut broadcasting = Vec::with_capacity(broadcast_to.len()); - for (peer, peer_id) in broadcast_to.iter().zip(peer_ids.iter()) { + for peer in broadcast_to.iter() { let msg = UpdateMsg::BroadcastTo { id: *id, key: *key, @@ -586,7 +584,7 @@ impl Operation for UpdateOp { sender: sender.clone(), target: peer.clone(), }; - let f = conn_manager.send(peer_id, msg.into()); + let f = conn_manager.send(peer.addr(), msg.into()); broadcasting.push(f); } let error_futures = futures::future::join_all(broadcasting) @@ -611,7 +609,7 @@ impl Operation for UpdateOp { err ); // TODO: review this, maybe we should just dropping this subscription - conn_manager.drop_connection(&peer.peer()).await?; + conn_manager.drop_connection(peer.addr()).await?; incorrect_results += 1; } diff --git a/crates/core/src/ring/connection_manager.rs b/crates/core/src/ring/connection_manager.rs index 711f14e84..8713ec34d 100644 --- a/crates/core/src/ring/connection_manager.rs +++ b/crates/core/src/ring/connection_manager.rs @@ -300,7 +300,31 @@ impl ConnectionManager { self.peer_key.lock().clone() } - #[allow(dead_code)] + /// Look up a PeerId by socket address from connections_by_location or transient connections. + pub fn get_peer_by_addr(&self, addr: SocketAddr) -> Option { + // Check connections by location + let connections = self.connections_by_location.read(); + for conns in connections.values() { + for conn in conns { + if conn.location.addr() == addr { + return Some(conn.location.peer()); + } + } + } + drop(connections); + + // Check transient connections + if let Some((peer, _)) = self + .transient_connections + .iter() + .find(|e| e.key().addr == addr) + .map(|e| (e.key().clone(), e.value().clone())) + { + return Some(peer); + } + None + } + pub fn is_gateway(&self) -> bool { self.is_gateway } diff --git a/crates/core/src/ring/mod.rs b/crates/core/src/ring/mod.rs index 572716ebb..411a4d4f5 100644 --- a/crates/core/src/ring/mod.rs +++ b/crates/core/src/ring/mod.rs @@ -575,7 +575,7 @@ impl Ring { notifier .notifications_sender .send(Either::Right(crate::message::NodeEvent::DropConnection( - peer.peer(), + peer.addr(), ))) .await .map_err(|error| { From ff5db9ed1275b55300e965713bee1e2ab6bcf4fa Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 19:30:41 -0600 Subject: [PATCH 08/32] fix(network): rewrite sender addresses for NAT traversal in all operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Peers behind NAT don't know their external address and send messages with their local address (e.g., 127.0.0.1 or 192.168.x.x). When the gateway or relay receives these messages, it needs to update the sender field with the observed transport address so responses can be routed correctly. This commit adds `rewrite_sender_addr` methods to all operation message types (Put, Get, Subscribe, Update) and calls them in p2p_protoc.rs for all inbound messages. The Connect operation already handled this via its observed_addr mechanism. Changes: - message.rs: Add rewrite_sender_addr to NetMessage and NetMessageV1 - operations/put.rs: Add rewrite_sender_addr to PutMsg - operations/get.rs: Add rewrite_sender_addr to GetMsg - operations/subscribe.rs: Add rewrite_sender_addr to SubscribeMsg - operations/update.rs: Add rewrite_sender_addr to UpdateMsg - p2p_protoc.rs: Call msg.rewrite_sender_addr(remote_addr) for inbound msgs 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/message.rs | 28 ++++++++++++++++ .../src/node/network_bridge/p2p_protoc.rs | 8 +++++ crates/core/src/operations/get.rs | 17 ++++++++++ crates/core/src/operations/put.rs | 33 +++++++++++++++++++ crates/core/src/operations/subscribe.rs | 20 +++++++++++ crates/core/src/operations/update.rs | 26 +++++++++++++++ 6 files changed, 132 insertions(+) diff --git a/crates/core/src/message.rs b/crates/core/src/message.rs index 84146adc5..f37e364eb 100644 --- a/crates/core/src/message.rs +++ b/crates/core/src/message.rs @@ -5,6 +5,7 @@ use std::{ borrow::{Borrow, Cow}, fmt::Display, + net::SocketAddr, time::{Duration, SystemTime}, }; @@ -272,6 +273,17 @@ pub(crate) enum NetMessage { V1(NetMessageV1), } +impl NetMessage { + /// Updates the sender's address in the message with the observed transport address. + /// This is essential for NAT traversal - peers behind NAT don't know their external + /// address, so we update it based on what the transport layer observed. + pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: SocketAddr) { + match self { + NetMessage::V1(msg) => msg.rewrite_sender_addr(observed_addr), + } + } +} + #[derive(Debug, Serialize, Deserialize, Clone)] pub(crate) enum NetMessageV1 { Connect(ConnectMsg), @@ -287,6 +299,22 @@ pub(crate) enum NetMessageV1 { Aborted(Transaction), } +impl NetMessageV1 { + /// Updates the sender's address in the message with the observed transport address. + fn rewrite_sender_addr(&mut self, observed_addr: SocketAddr) { + match self { + NetMessageV1::Put(msg) => msg.rewrite_sender_addr(observed_addr), + NetMessageV1::Get(msg) => msg.rewrite_sender_addr(observed_addr), + NetMessageV1::Subscribe(msg) => msg.rewrite_sender_addr(observed_addr), + NetMessageV1::Update(msg) => msg.rewrite_sender_addr(observed_addr), + // Connect messages are handled separately (they use observed_addr field) + NetMessageV1::Connect(_) => {} + // These don't have sender addresses to rewrite + NetMessageV1::Unsubscribed { .. } | NetMessageV1::Aborted(_) => {} + } + } +} + trait Versioned { fn version(&self) -> semver::Version; } diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index a2896cf87..7afb5ac4e 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -365,6 +365,14 @@ impl P2pConnManager { payload.observed_addr = Some(remote_addr); } } + // Rewrite sender addresses in all inbound messages with the observed + // transport address. This is essential for NAT traversal - peers behind + // NAT don't know their external address, so we update it based on what + // the transport layer observed. Without this, responses would be sent + // to the wrong address (e.g., 127.0.0.1 instead of the real NAT address). + if let Some(remote_addr) = remote { + msg.rewrite_sender_addr(remote_addr); + } ctx.handle_inbound_message(msg, &op_manager, &mut state) .await?; } diff --git a/crates/core/src/operations/get.rs b/crates/core/src/operations/get.rs index 430a1d342..3859024e6 100644 --- a/crates/core/src/operations/get.rs +++ b/crates/core/src/operations/get.rs @@ -1528,6 +1528,23 @@ mod messages { Self::ReturnGet { sender, .. } => Some(sender), } } + + /// Updates sender addresses with the observed transport address. + /// This is essential for NAT traversal - peers behind NAT don't know their external + /// address, so we rewrite it based on what the transport layer observed. + pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: std::net::SocketAddr) { + match self { + Self::RequestGet { sender, .. } => { + sender.set_addr(observed_addr); + } + Self::SeekNode { sender, .. } => { + sender.set_addr(observed_addr); + } + Self::ReturnGet { sender, .. } => { + sender.set_addr(observed_addr); + } + } + } } impl Display for GetMsg { diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index 0ef178e8a..20e92f802 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -1638,6 +1638,39 @@ mod messages { _ => None, } } + + /// Updates sender/origin addresses with the observed transport address. + /// This is essential for NAT traversal - peers behind NAT don't know their external + /// address, so we rewrite it based on what the transport layer observed. + pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: std::net::SocketAddr) { + match self { + Self::RequestPut { sender, origin, .. } => { + sender.set_addr(observed_addr); + origin.set_addr(observed_addr); + } + Self::SeekNode { sender, origin, .. } => { + sender.set_addr(observed_addr); + origin.set_addr(observed_addr); + } + Self::PutForward { sender, origin, .. } => { + sender.set_addr(observed_addr); + origin.set_addr(observed_addr); + } + Self::SuccessfulPut { sender, origin, .. } => { + sender.set_addr(observed_addr); + origin.set_addr(observed_addr); + } + Self::Broadcasting { sender, origin, .. } => { + sender.set_addr(observed_addr); + origin.set_addr(observed_addr); + } + Self::BroadcastTo { sender, origin, .. } => { + sender.set_addr(observed_addr); + origin.set_addr(observed_addr); + } + Self::AwaitPut { .. } => {} + } + } } impl Display for PutMsg { diff --git a/crates/core/src/operations/subscribe.rs b/crates/core/src/operations/subscribe.rs index e5ee9f852..4a702acbc 100644 --- a/crates/core/src/operations/subscribe.rs +++ b/crates/core/src/operations/subscribe.rs @@ -976,6 +976,26 @@ mod messages { _ => None, } } + + /// Updates subscriber/sender addresses with the observed transport address. + /// This is essential for NAT traversal - peers behind NAT don't know their external + /// address, so we rewrite it based on what the transport layer observed. + pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: std::net::SocketAddr) { + match self { + Self::FetchRouting { .. } => { + // No sender field to rewrite + } + Self::RequestSub { subscriber, .. } => { + subscriber.set_addr(observed_addr); + } + Self::SeekNode { subscriber, .. } => { + subscriber.set_addr(observed_addr); + } + Self::ReturnSub { sender, .. } => { + sender.set_addr(observed_addr); + } + } + } } impl Display for SubscribeMsg { diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index 2b2b10a63..8d4ca50e0 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -1312,6 +1312,32 @@ mod messages { _ => None, } } + + /// Updates sender addresses with the observed transport address. + /// This is essential for NAT traversal - peers behind NAT don't know their external + /// address, so we rewrite it based on what the transport layer observed. + pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: std::net::SocketAddr) { + match self { + Self::RequestUpdate { sender, .. } => { + sender.set_addr(observed_addr); + } + Self::AwaitUpdate { .. } => { + // No sender field to rewrite + } + Self::SeekNode { sender, .. } => { + sender.set_addr(observed_addr); + } + Self::Broadcasting { + sender, upstream, .. + } => { + sender.set_addr(observed_addr); + upstream.set_addr(observed_addr); + } + Self::BroadcastTo { sender, .. } => { + sender.set_addr(observed_addr); + } + } + } } impl Display for UpdateMsg { From f99bec2038121361aad9203ab821caee07401b63 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 21:13:34 -0600 Subject: [PATCH 09/32] refactor(network): change connections HashMap key from PeerId to SocketAddr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This completes Phase 2 of issue #2164 by changing the P2pConnManager's connections HashMap to use SocketAddr as the key instead of PeerId. Changes: - Introduce ConnectionEntry struct storing sender channel and optional pub_key - Add addr_by_pub_key reverse lookup for identity-based lookups - Update all connection map operations to use SocketAddr keys - Reconstruct PeerId when needed for ring operations and queries - Maintain pub_key learning from first inbound message This allows connection management to be based on socket addresses (the transport-level identifier) while still supporting identity-based lookups when needed. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../src/node/network_bridge/p2p_protoc.rs | 194 +++++++++++++----- 1 file changed, 137 insertions(+), 57 deletions(-) diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index 7afb5ac4e..ab96907e1 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -138,12 +138,28 @@ impl NetworkBridge for P2pBridge { type PeerConnChannelSender = Sender>; type PeerConnChannelRecv = Receiver>; +/// Entry in the connections HashMap, keyed by SocketAddr. +/// The pub_key is learned from the first message received on this connection. +#[derive(Debug)] +struct ConnectionEntry { + sender: PeerConnChannelSender, + /// The peer's public key, learned from the first message. + /// None for transient connections before identity is established. + pub_key: Option, +} + pub(in crate::node) struct P2pConnManager { pub(in crate::node) gateways: Vec, pub(in crate::node) bridge: P2pBridge, conn_bridge_rx: Receiver, event_listener: Box, - connections: HashMap, + /// Connections indexed by socket address (the transport-level identifier). + /// This is the source of truth for active connections. + connections: HashMap, + /// Reverse lookup: public key -> socket address. + /// Used to find connections when we only know the peer's identity. + /// Must be kept in sync with `connections`. + addr_by_pub_key: HashMap, conn_event_tx: Option>, key_pair: TransportKeypair, listening_ip: IpAddr, @@ -214,6 +230,7 @@ impl P2pConnManager { conn_bridge_rx: rx_bridge_cmd, event_listener: Box::new(event_listener), connections: HashMap::new(), + addr_by_pub_key: HashMap::new(), conn_event_tx: None, key_pair, listening_ip: listener_ip, @@ -244,6 +261,7 @@ impl P2pConnManager { conn_bridge_rx, event_listener, connections, + addr_by_pub_key, conn_event_tx: _, key_pair, listening_ip, @@ -318,6 +336,7 @@ impl P2pConnManager { conn_bridge_rx: tokio::sync::mpsc::channel(1).1, // Dummy, won't be used event_listener, connections, + addr_by_pub_key, conn_event_tx: Some(conn_event_tx.clone()), key_pair, listening_ip, @@ -421,18 +440,18 @@ impl P2pConnManager { // removed by another task between those two calls. let peer_connection = ctx .connections - .get(&target_peer.peer()) + .get(&target_peer.addr()) .or_else(|| { if target_peer.addr().ip().is_unspecified() { ctx.connection_entry_by_pub_key(target_peer.pub_key()) - .map(|(existing_peer, sender)| { + .map(|(resolved_addr, entry)| { tracing::info!( tx = %msg.id(), target_peer = %target_peer.peer(), - resolved_addr = %existing_peer.addr, + resolved_addr = %resolved_addr, "Resolved outbound connection using peer public key due to unspecified address" ); - sender + entry }) } else { None @@ -448,7 +467,7 @@ impl P2pConnManager { ); match peer_connection { Some(peer_connection) => { - if let Err(e) = peer_connection.send(Left(msg.clone())).await { + if let Err(e) = peer_connection.sender.send(Left(msg.clone())).await { tracing::error!( tx = %msg.id(), "Failed to send message to peer: {}", e @@ -609,11 +628,19 @@ impl P2pConnManager { // Clean up all active connections let peers_to_cleanup: Vec<_> = - ctx.connections.keys().cloned().collect(); - for peer in peers_to_cleanup { - tracing::debug!(%peer, "Cleaning up active connection due to critical channel closure"); - - // Clean up ring state + ctx.connections.iter().map(|(addr, entry)| { + (*addr, entry.pub_key.clone()) + }).collect(); + for (peer_addr, pub_key_opt) in peers_to_cleanup { + tracing::debug!(%peer_addr, "Cleaning up active connection due to critical channel closure"); + + // Clean up ring state - construct PeerId with pub_key if available + let peer = if let Some(pub_key) = pub_key_opt.clone() { + PeerId::new(peer_addr, pub_key) + } else { + // Use our own pub_key as placeholder if we don't know the peer's + PeerId::new(peer_addr, (*ctx.bridge.op_manager.ring.connection_manager.pub_key).clone()) + }; ctx.bridge .op_manager .ring @@ -621,8 +648,11 @@ impl P2pConnManager { .await; // Remove from connection map - tracing::debug!(self_peer = %ctx.bridge.op_manager.ring.connection_manager.pub_key, %peer, conn_map_size = ctx.connections.len(), "[CONN_TRACK] REMOVE: ClosedChannel cleanup - removing from connections HashMap"); - ctx.connections.remove(&peer); + tracing::debug!(self_peer = %ctx.bridge.op_manager.ring.connection_manager.pub_key, %peer_addr, conn_map_size = ctx.connections.len(), "[CONN_TRACK] REMOVE: ClosedChannel cleanup - removing from connections HashMap"); + ctx.connections.remove(&peer_addr); + if let Some(pub_key) = pub_key_opt { + ctx.addr_by_pub_key.remove(&pub_key); + } // Notify handshake handler to clean up if let Err(error) = handshake_cmd_sender @@ -663,13 +693,16 @@ impl P2pConnManager { } ConnEvent::NodeAction(action) => match action { NodeEvent::DropConnection(peer_addr) => { - // Find the PeerId by socket address - let peer = ctx - .connections - .keys() - .find(|p| p.addr == peer_addr) - .cloned(); - if let Some(peer) = peer { + // Look up the connection entry by address + if let Some(entry) = ctx.connections.get(&peer_addr) { + // Construct PeerId from stored pub_key or fallback + let peer = if let Some(ref pub_key) = entry.pub_key { + PeerId::new(peer_addr, pub_key.clone()) + } else { + PeerId::new(peer_addr, (*ctx.bridge.op_manager.ring.connection_manager.pub_key).clone()) + }; + let pub_key_to_remove = entry.pub_key.clone(); + tracing::debug!(self_peer = %ctx.bridge.op_manager.ring.connection_manager.pub_key, %peer, conn_map_size = ctx.connections.len(), "[CONN_TRACK] REMOVE: DropConnection event - removing from connections HashMap"); if let Err(error) = handshake_cmd_sender .send(HandshakeCommand::DropConnection { @@ -689,11 +722,15 @@ impl P2pConnManager { .ring .prune_connection(peer.clone()) .await; - if let Some(conn) = ctx.connections.remove(&peer) { + if let Some(conn) = ctx.connections.remove(&peer_addr) { + // Also remove from reverse lookup + if let Some(pub_key) = pub_key_to_remove { + ctx.addr_by_pub_key.remove(&pub_key); + } // TODO: review: this could potentially leave garbage tasks in the background with peer listener match timeout( Duration::from_secs(1), - conn.send(Right(ConnEvent::NodeAction( + conn.sender.send(Right(ConnEvent::NodeAction( NodeEvent::DropConnection(peer_addr), ))), ) @@ -760,7 +797,17 @@ impl P2pConnManager { } } NodeEvent::QueryConnections { callback } => { - let connections = ctx.connections.keys().cloned().collect(); + // Reconstruct PeerIds from stored connections + let connections: Vec = ctx.connections.iter() + .map(|(addr, entry)| { + if let Some(ref pub_key) = entry.pub_key { + PeerId::new(*addr, pub_key.clone()) + } else { + // Use our own pub_key as placeholder if we don't know the peer's + PeerId::new(*addr, (*ctx.bridge.op_manager.ring.connection_manager.pub_key).clone()) + } + }) + .collect(); match timeout( Duration::from_secs(1), callback.send(QueryResult::Connections(connections)), @@ -816,7 +863,16 @@ impl P2pConnManager { } } - let connections = ctx.connections.keys().cloned().collect(); + // Reconstruct PeerIds from stored connections + let connections: Vec = ctx.connections.iter() + .map(|(addr, entry)| { + if let Some(ref pub_key) = entry.pub_key { + PeerId::new(*addr, pub_key.clone()) + } else { + PeerId::new(*addr, (*ctx.bridge.op_manager.ring.connection_manager.pub_key).clone()) + } + }) + .collect(); let debug_info = crate::message::NetworkDebugInfo { application_subscriptions: app_subscriptions, network_subscriptions: network_subs, @@ -1280,13 +1336,15 @@ impl P2pConnManager { ); } + /// Looks up a connection by public key using the reverse lookup map. + /// Returns the socket address and connection entry if found. fn connection_entry_by_pub_key( &self, pub_key: &TransportPublicKey, - ) -> Option<(&PeerId, &PeerConnChannelSender)> { - self.connections - .iter() - .find(|(peer_id, _)| peer_id.pub_key == *pub_key) + ) -> Option<(SocketAddr, &ConnectionEntry)> { + self.addr_by_pub_key + .get(pub_key) + .and_then(|addr| self.connections.get(addr).map(|entry| (*addr, entry))) } async fn handle_connect_peer( @@ -1302,9 +1360,9 @@ impl P2pConnManager { let mut peer_addr = peer.addr; if peer_addr.ip().is_unspecified() { - if let Some((existing_peer, _)) = self.connection_entry_by_pub_key(&peer.pub_key) { - peer_addr = existing_peer.addr; - peer.addr = existing_peer.addr; + if let Some((existing_addr, _)) = self.connection_entry_by_pub_key(&peer.pub_key) { + peer_addr = existing_addr; + peer.addr = existing_addr; tracing::info!( tx = %tx, remote = %peer, @@ -1357,7 +1415,7 @@ impl P2pConnManager { } // If a transient transport already exists, promote it without dialing anew. - if self.connections.contains_key(&peer) { + if self.connections.contains_key(&peer.addr) { tracing::info!( tx = %tx, remote = %peer, @@ -1816,7 +1874,7 @@ impl P2pConnManager { // Only insert if connection doesn't already exist to avoid dropping existing channel let mut newly_inserted = false; - if !self.connections.contains_key(&peer_id) { + if !self.connections.contains_key(&peer_id.addr) { if is_transient { let current = transient_manager.count(); if current >= transient_manager.budget() { @@ -1831,7 +1889,12 @@ impl P2pConnManager { } let (tx, rx) = mpsc::channel(10); tracing::debug!(self_peer = %self.bridge.op_manager.ring.connection_manager.pub_key, %peer_id, conn_map_size = self.connections.len(), "[CONN_TRACK] INSERT: OutboundConnectionSuccessful - adding to connections HashMap"); - self.connections.insert(peer_id.clone(), tx); + self.connections.insert(peer_id.addr, ConnectionEntry { + sender: tx, + pub_key: Some(peer_id.pub_key.clone()), + }); + // Add to reverse lookup + self.addr_by_pub_key.insert(peer_id.pub_key.clone(), peer_id.addr); let Some(conn_events) = self.conn_event_tx.as_ref().cloned() else { anyhow::bail!("Connection event channel not initialized"); }; @@ -1979,26 +2042,37 @@ impl P2pConnManager { } } } - if let Some(existing_key) = self - .connections - .keys() - .find(|peer| { - peer.addr == remote_addr && peer.pub_key != new_peer_id.pub_key - }) - .cloned() - { - if let Some(channel) = self.connections.remove(&existing_key) { + // Check if we have a connection but with a different pub_key + if let Some(entry) = self.connections.get(&remote_addr) { + // If we don't have the pub_key stored yet or it differs from the new one, update it + let should_update = match &entry.pub_key { + None => true, + Some(old_pub_key) => old_pub_key != &new_peer_id.pub_key, + }; + if should_update { + let old_pub_key = entry.pub_key.clone(); tracing::info!( remote = %remote_addr, - old_peer = %existing_key, - new_peer = %new_peer_id, - "Updating provisional peer identity after inbound message" - ); - self.bridge.op_manager.ring.update_connection_identity( - &existing_key, - new_peer_id.clone(), + old_pub_key = ?old_pub_key, + new_pub_key = %new_peer_id.pub_key, + "Updating peer identity after inbound message" ); - self.connections.insert(new_peer_id, channel); + // Remove old reverse lookup if it exists + if let Some(old_key) = old_pub_key { + self.addr_by_pub_key.remove(&old_key); + // Update ring with old PeerId -> new PeerId + let old_peer = PeerId::new(remote_addr, old_key); + self.bridge.op_manager.ring.update_connection_identity( + &old_peer, + new_peer_id.clone(), + ); + } + // Update the entry's pub_key + if let Some(entry) = self.connections.get_mut(&remote_addr) { + entry.pub_key = Some(new_peer_id.pub_key.clone()); + } + // Add new reverse lookup + self.addr_by_pub_key.insert(new_peer_id.pub_key.clone(), remote_addr); } } } @@ -2021,18 +2095,24 @@ impl P2pConnManager { ?error, "peer_connection_listener reported transport closure" ); - if let Some(peer) = self - .connections - .keys() - .find_map(|k| (k.addr == remote_addr).then(|| k.clone())) - { + // Look up the connection directly by address + if let Some(entry) = self.connections.remove(&remote_addr) { + // Construct PeerId for prune_connection and DropConnection + let peer = if let Some(ref pub_key) = entry.pub_key { + PeerId::new(remote_addr, pub_key.clone()) + } else { + PeerId::new(remote_addr, (*self.bridge.op_manager.ring.connection_manager.pub_key).clone()) + }; + // Remove from reverse lookup + if let Some(pub_key) = entry.pub_key { + self.addr_by_pub_key.remove(&pub_key); + } tracing::debug!(self_peer = %self.bridge.op_manager.ring.connection_manager.pub_key, %peer, socket_addr = %remote_addr, conn_map_size = self.connections.len(), "[CONN_TRACK] REMOVE: TransportClosed - removing from connections HashMap"); self.bridge .op_manager .ring .prune_connection(peer.clone()) .await; - self.connections.remove(&peer); if let Err(error) = handshake_commands .send(HandshakeCommand::DropConnection { peer: peer.clone() }) .await From 2bfb7cc3631df5657a356aee617a0fafa0576d8a Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 23:08:23 -0600 Subject: [PATCH 10/32] refactor(routing): thread source_addr through call chain, remove rewrite_sender_addr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 4 of #2164 - Connection-based routing refactor. Key changes: - Add InboundMessage wrapper in message.rs for carrying source address - Add target_addr: Option to OperationResult for explicit routing - Thread source_addr parameter through the message processing call chain: p2p_protoc::handle_inbound_message → process_message_decoupled → handle_pure_network_message → handle_pure_network_message_v1 - Remove rewrite_sender_addr() methods from all message types (GetMsg, PutMsg, SubscribeMsg, UpdateMsg, NetMessage, NetMessageV1) This replaces the old NAT workaround (mutating message contents to fix sender addresses) with explicit address passing. The source address from the transport layer is now available for operations to use for routing decisions. Note: InboundMessage and source_addr are prepared but not yet fully utilized - operations still use embedded message addresses. Next steps will remove address fields from wire messages and have operations use source_addr for upstream routing. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/message.rs | 73 ++++++++++++------- crates/core/src/node/mod.rs | 5 ++ .../src/node/network_bridge/p2p_protoc.rs | 25 ++++--- crates/core/src/operations/connect.rs | 1 + crates/core/src/operations/get.rs | 18 +---- crates/core/src/operations/mod.rs | 28 ++++--- crates/core/src/operations/put.rs | 35 +-------- crates/core/src/operations/subscribe.rs | 23 +----- crates/core/src/operations/update.rs | 27 +------ 9 files changed, 92 insertions(+), 143 deletions(-) diff --git a/crates/core/src/message.rs b/crates/core/src/message.rs index f37e364eb..df2ee5eda 100644 --- a/crates/core/src/message.rs +++ b/crates/core/src/message.rs @@ -268,20 +268,57 @@ pub(crate) trait MessageStats { fn requested_location(&self) -> Option; } -#[derive(Debug, Serialize, Deserialize, Clone)] -pub(crate) enum NetMessage { - V1(NetMessageV1), +/// Wrapper for inbound messages that carries the source address from the transport layer. +/// This separates routing concerns from message content - the source address is determined by +/// the network layer (from the packet), not embedded in the serialized message. +/// +/// Generic over the message type so it can wrap: +/// - `NetMessage` at the network layer (p2p_protoc.rs) +/// - Specific operation messages (GetMsg, PutMsg, etc.) at the operation layer +/// +/// Note: Currently unused but prepared for Phase 4 of #2164. +/// Will be used to thread source addresses to operations for routing. +#[allow(dead_code)] +#[derive(Debug, Clone)] +pub struct InboundMessage { + /// The message content + pub msg: M, + /// The socket address this message was received from (from UDP packet source) + pub source_addr: SocketAddr, } -impl NetMessage { - /// Updates the sender's address in the message with the observed transport address. - /// This is essential for NAT traversal - peers behind NAT don't know their external - /// address, so we update it based on what the transport layer observed. - pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: SocketAddr) { - match self { - NetMessage::V1(msg) => msg.rewrite_sender_addr(observed_addr), +#[allow(dead_code)] +impl InboundMessage { + /// Create a new inbound message wrapper + pub fn new(msg: M, source_addr: SocketAddr) -> Self { + Self { msg, source_addr } + } + + /// Transform the inner message while preserving source_addr + pub fn map(self, f: impl FnOnce(M) -> N) -> InboundMessage { + InboundMessage { + msg: f(self.msg), + source_addr: self.source_addr, } } + + /// Get a reference to the inner message + pub fn inner(&self) -> &M { + &self.msg + } +} + +#[allow(dead_code)] +impl InboundMessage { + /// Get the transaction ID from the wrapped network message + pub fn id(&self) -> &Transaction { + self.msg.id() + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub(crate) enum NetMessage { + V1(NetMessageV1), } #[derive(Debug, Serialize, Deserialize, Clone)] @@ -299,22 +336,6 @@ pub(crate) enum NetMessageV1 { Aborted(Transaction), } -impl NetMessageV1 { - /// Updates the sender's address in the message with the observed transport address. - fn rewrite_sender_addr(&mut self, observed_addr: SocketAddr) { - match self { - NetMessageV1::Put(msg) => msg.rewrite_sender_addr(observed_addr), - NetMessageV1::Get(msg) => msg.rewrite_sender_addr(observed_addr), - NetMessageV1::Subscribe(msg) => msg.rewrite_sender_addr(observed_addr), - NetMessageV1::Update(msg) => msg.rewrite_sender_addr(observed_addr), - // Connect messages are handled separately (they use observed_addr field) - NetMessageV1::Connect(_) => {} - // These don't have sender addresses to rewrite - NetMessageV1::Unsubscribed { .. } | NetMessageV1::Aborted(_) => {} - } - } -} - trait Versioned { fn version(&self) -> semver::Version; } diff --git a/crates/core/src/node/mod.rs b/crates/core/src/node/mod.rs index 187f3e224..500c2974c 100644 --- a/crates/core/src/node/mod.rs +++ b/crates/core/src/node/mod.rs @@ -620,6 +620,7 @@ pub(super) async fn process_message( #[allow(clippy::too_many_arguments)] pub(crate) async fn process_message_decoupled( msg: NetMessage, + source_addr: Option, op_manager: Arc, conn_manager: CB, mut event_listener: Box, @@ -634,6 +635,7 @@ pub(crate) async fn process_message_decoupled( // Pure network message processing - no client types involved let op_result = handle_pure_network_message( msg, + source_addr, op_manager.clone(), conn_manager, event_listener.as_mut(), @@ -679,6 +681,7 @@ pub(crate) async fn process_message_decoupled( #[allow(clippy::too_many_arguments)] async fn handle_pure_network_message( msg: NetMessage, + source_addr: Option, op_manager: Arc, conn_manager: CB, event_listener: &mut dyn NetEventRegister, @@ -691,6 +694,7 @@ where NetMessage::V1(msg_v1) => { handle_pure_network_message_v1( msg_v1, + source_addr, op_manager, conn_manager, event_listener, @@ -860,6 +864,7 @@ async fn process_message_v1( #[allow(clippy::too_many_arguments)] async fn handle_pure_network_message_v1( msg: NetMessageV1, + _source_addr: Option, op_manager: Arc, mut conn_manager: CB, event_listener: &mut dyn NetEventRegister, diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index ab96907e1..7390e6b3a 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -384,15 +384,10 @@ impl P2pConnManager { payload.observed_addr = Some(remote_addr); } } - // Rewrite sender addresses in all inbound messages with the observed - // transport address. This is essential for NAT traversal - peers behind - // NAT don't know their external address, so we update it based on what - // the transport layer observed. Without this, responses would be sent - // to the wrong address (e.g., 127.0.0.1 instead of the real NAT address). - if let Some(remote_addr) = remote { - msg.rewrite_sender_addr(remote_addr); - } - ctx.handle_inbound_message(msg, &op_manager, &mut state) + // Pass the source address through to operations for routing. + // This replaces the old rewrite_sender_addr hack - instead of mutating + // message contents, we pass the observed transport address separately. + ctx.handle_inbound_message(msg, remote, &op_manager, &mut state) .await?; } ConnEvent::OutboundMessage(NetMessage::V1(NetMessageV1::Aborted(tx))) => { @@ -423,8 +418,8 @@ impl P2pConnManager { self_peer = %self_peer_id, "BUG: OutboundMessage targets self! This indicates a routing logic error - messages should not reach OutboundMessage handler if they target self" ); - // Convert to InboundMessage and process locally - ctx.handle_inbound_message(msg, &op_manager, &mut state) + // Convert to InboundMessage and process locally (no remote source) + ctx.handle_inbound_message(msg, None, &op_manager, &mut state) .await?; continue; } @@ -1267,6 +1262,7 @@ impl P2pConnManager { async fn handle_inbound_message( &self, msg: NetMessage, + source_addr: Option, op_manager: &Arc, state: &mut EventListenerState, ) -> anyhow::Result<()> { @@ -1274,6 +1270,7 @@ impl P2pConnManager { tracing::debug!( %tx, tx_type = ?tx.transaction_type(), + ?source_addr, "Handling inbound NetMessage at event loop" ); match msg { @@ -1281,7 +1278,8 @@ impl P2pConnManager { handle_aborted_op(tx, op_manager, &self.gateways).await?; } msg => { - self.process_message(msg, op_manager, None, state).await; + self.process_message(msg, source_addr, op_manager, None, state) + .await; } } Ok(()) @@ -1290,6 +1288,7 @@ impl P2pConnManager { async fn process_message( &self, msg: NetMessage, + source_addr: Option, op_manager: &Arc, executor_callback_opt: Option>, state: &mut EventListenerState, @@ -1298,6 +1297,7 @@ impl P2pConnManager { tx = %msg.id(), tx_type = ?msg.id().transaction_type(), msg_type = %msg, + ?source_addr, peer = %op_manager.ring.connection_manager.get_peer_key().unwrap(), "process_message called - processing network message" ); @@ -1325,6 +1325,7 @@ impl P2pConnManager { GlobalExecutor::spawn( process_message_decoupled( msg, + source_addr, op_manager.clone(), self.bridge.clone(), self.event_listener.trait_clone(), diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index 59425d13b..e4423b7ca 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -992,6 +992,7 @@ fn store_operation_state_with_msg(op: &mut ConnectOp, msg: Option) - let state_clone = op.state.clone(); OperationResult { return_msg: msg.map(|m| NetMessage::V1(NetMessageV1::Connect(m))), + target_addr: None, state: state_clone.map(|state| { OpEnum::Connect(Box::new(ConnectOp { id: op.id, diff --git a/crates/core/src/operations/get.rs b/crates/core/src/operations/get.rs index 3859024e6..9eb1b0da3 100644 --- a/crates/core/src/operations/get.rs +++ b/crates/core/src/operations/get.rs @@ -1336,6 +1336,7 @@ fn build_op_result( }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), + target_addr: None, state: output_op.map(OpEnum::Get), }) } @@ -1528,23 +1529,6 @@ mod messages { Self::ReturnGet { sender, .. } => Some(sender), } } - - /// Updates sender addresses with the observed transport address. - /// This is essential for NAT traversal - peers behind NAT don't know their external - /// address, so we rewrite it based on what the transport layer observed. - pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: std::net::SocketAddr) { - match self { - Self::RequestGet { sender, .. } => { - sender.set_addr(observed_addr); - } - Self::SeekNode { sender, .. } => { - sender.set_addr(observed_addr); - } - Self::ReturnGet { sender, .. } => { - sender.set_addr(observed_addr); - } - } - } } impl Display for GetMsg { diff --git a/crates/core/src/operations/mod.rs b/crates/core/src/operations/mod.rs index 18626b344..17181d475 100644 --- a/crates/core/src/operations/mod.rs +++ b/crates/core/src/operations/mod.rs @@ -6,6 +6,8 @@ use freenet_stdlib::prelude::ContractKey; use futures::Future; use tokio::sync::mpsc::error::SendError; +use std::net::SocketAddr; + use crate::{ client_events::HostResult, contract::{ContractError, ExecutorError}, @@ -48,6 +50,9 @@ where pub(crate) struct OperationResult { /// Inhabited if there is a message to return to the other peer. pub return_msg: Option, + /// Where to send the return message. Required if return_msg is Some. + /// This replaces the old pattern of embedding target in the message itself. + pub target_addr: Option, /// None if the operation has been completed. pub state: Option, } @@ -104,6 +109,7 @@ where } Ok(OperationResult { return_msg: None, + target_addr: _, state: Some(final_state), }) if final_state.finalized() => { if op_manager.failed_parents().remove(&tx_id).is_some() { @@ -137,23 +143,24 @@ where } Ok(OperationResult { return_msg: Some(msg), + target_addr, state: Some(updated_state), }) => { if updated_state.finalized() { let id = *msg.id(); tracing::debug!(%id, "operation finalized with outgoing message"); op_manager.completed(id); - if let Some(target) = msg.target() { - tracing::debug!(%id, %target, "sending final message to target"); - network_bridge.send(target.addr(), msg).await?; + if let Some(target) = target_addr { + tracing::debug!(%id, ?target, "sending final message to target"); + network_bridge.send(target, msg).await?; } return Ok(Some(updated_state)); } else { let id = *msg.id(); tracing::debug!(%id, "operation in progress"); - if let Some(target) = msg.target() { - tracing::debug!(%id, %target, "sending updated op state"); - network_bridge.send(target.addr(), msg).await?; + if let Some(target) = target_addr { + tracing::debug!(%id, ?target, "sending updated op state"); + network_bridge.send(target, msg).await?; op_manager.push(id, updated_state).await?; } else { tracing::debug!(%id, "queueing op state for local processing"); @@ -174,6 +181,7 @@ where Ok(OperationResult { return_msg: None, + target_addr: _, state: Some(updated_state), }) => { let id = *updated_state.id(); @@ -181,17 +189,19 @@ where } Ok(OperationResult { return_msg: Some(msg), + target_addr, state: None, }) => { op_manager.completed(tx_id); - if let Some(target) = msg.target() { - tracing::debug!(%tx_id, target=%target.peer(), "sending back message to target"); - network_bridge.send(target.addr(), msg).await?; + if let Some(target) = target_addr { + tracing::debug!(%tx_id, ?target, "sending back message to target"); + network_bridge.send(target, msg).await?; } } Ok(OperationResult { return_msg: None, + target_addr: _, state: None, }) => { op_manager.completed(tx_id); diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index 20e92f802..ffb8ad2b9 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -771,6 +771,7 @@ impl Operation for PutOp { ); return Ok(OperationResult { return_msg: None, + target_addr: None, state: None, }); } @@ -940,6 +941,7 @@ fn build_op_result( }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), + target_addr: None, state: output_op.map(OpEnum::Put), }) } @@ -1638,39 +1640,6 @@ mod messages { _ => None, } } - - /// Updates sender/origin addresses with the observed transport address. - /// This is essential for NAT traversal - peers behind NAT don't know their external - /// address, so we rewrite it based on what the transport layer observed. - pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: std::net::SocketAddr) { - match self { - Self::RequestPut { sender, origin, .. } => { - sender.set_addr(observed_addr); - origin.set_addr(observed_addr); - } - Self::SeekNode { sender, origin, .. } => { - sender.set_addr(observed_addr); - origin.set_addr(observed_addr); - } - Self::PutForward { sender, origin, .. } => { - sender.set_addr(observed_addr); - origin.set_addr(observed_addr); - } - Self::SuccessfulPut { sender, origin, .. } => { - sender.set_addr(observed_addr); - origin.set_addr(observed_addr); - } - Self::Broadcasting { sender, origin, .. } => { - sender.set_addr(observed_addr); - origin.set_addr(observed_addr); - } - Self::BroadcastTo { sender, origin, .. } => { - sender.set_addr(observed_addr); - origin.set_addr(observed_addr); - } - Self::AwaitPut { .. } => {} - } - } } impl Display for PutMsg { diff --git a/crates/core/src/operations/subscribe.rs b/crates/core/src/operations/subscribe.rs index 4a702acbc..1a237f0ba 100644 --- a/crates/core/src/operations/subscribe.rs +++ b/crates/core/src/operations/subscribe.rs @@ -431,6 +431,7 @@ impl Operation for SubscribeOp { target: subscriber.clone(), subscribed: false, })), + target_addr: None, state: None, }); } @@ -529,6 +530,7 @@ impl Operation for SubscribeOp { sender: this_peer.clone(), target: subscriber.clone(), })), + target_addr: None, state: None, } }; @@ -892,6 +894,7 @@ fn build_op_result( }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), + target_addr: None, state: output_op.map(OpEnum::Subscribe), }) } @@ -976,26 +979,6 @@ mod messages { _ => None, } } - - /// Updates subscriber/sender addresses with the observed transport address. - /// This is essential for NAT traversal - peers behind NAT don't know their external - /// address, so we rewrite it based on what the transport layer observed. - pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: std::net::SocketAddr) { - match self { - Self::FetchRouting { .. } => { - // No sender field to rewrite - } - Self::RequestSub { subscriber, .. } => { - subscriber.set_addr(observed_addr); - } - Self::SeekNode { subscriber, .. } => { - subscriber.set_addr(observed_addr); - } - Self::ReturnSub { sender, .. } => { - sender.set_addr(observed_addr); - } - } - } } impl Display for SubscribeMsg { diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index 8d4ca50e0..c56f82080 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -765,6 +765,7 @@ fn build_op_result( let state = output_op.map(OpEnum::Update); Ok(OperationResult { return_msg: return_msg.map(NetMessage::from), + target_addr: None, state, }) } @@ -1312,32 +1313,6 @@ mod messages { _ => None, } } - - /// Updates sender addresses with the observed transport address. - /// This is essential for NAT traversal - peers behind NAT don't know their external - /// address, so we rewrite it based on what the transport layer observed. - pub(crate) fn rewrite_sender_addr(&mut self, observed_addr: std::net::SocketAddr) { - match self { - Self::RequestUpdate { sender, .. } => { - sender.set_addr(observed_addr); - } - Self::AwaitUpdate { .. } => { - // No sender field to rewrite - } - Self::SeekNode { sender, .. } => { - sender.set_addr(observed_addr); - } - Self::Broadcasting { - sender, upstream, .. - } => { - sender.set_addr(observed_addr); - upstream.set_addr(observed_addr); - } - Self::BroadcastTo { sender, .. } => { - sender.set_addr(observed_addr); - } - } - } } impl Display for UpdateMsg { From f8cfabed0907f744002a8333fbafc07f5c8161d5 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 23:17:44 -0600 Subject: [PATCH 11/32] refactor(operations): add source_addr to Operation trait and handle_op_request MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add source_addr parameter to Operation::load_or_init and process_message - Update handle_op_request to accept and pass source_addr to operations - Add target_addr field to OperationResult (prepared for explicit routing) - Add #[allow(dead_code)] annotations for fields/methods prepared for Phase 4 Part of #2164 connection-based routing refactor. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/node/mod.rs | 67 ++++++++++++++++++------- crates/core/src/operations/connect.rs | 7 ++- crates/core/src/operations/get.rs | 13 ++--- crates/core/src/operations/mod.rs | 30 ++++++----- crates/core/src/operations/put.rs | 15 +++--- crates/core/src/operations/subscribe.rs | 11 ++-- crates/core/src/operations/update.rs | 14 +++--- 7 files changed, 98 insertions(+), 59 deletions(-) diff --git a/crates/core/src/node/mod.rs b/crates/core/src/node/mod.rs index 500c2974c..d0c1e2cab 100644 --- a/crates/core/src/node/mod.rs +++ b/crates/core/src/node/mod.rs @@ -732,8 +732,9 @@ async fn process_message_v1( transaction = %msg.id(), tx_type = %msg.id().transaction_type() ); + // Legacy path - no source_addr available let op_result = - handle_op_request::(&op_manager, &mut conn_manager, op) + handle_op_request::(&op_manager, &mut conn_manager, op, None) .instrument(span) .await; @@ -748,8 +749,10 @@ async fn process_message_v1( .await; } NetMessageV1::Put(ref op) => { + // Legacy path - no source_addr available let op_result = - handle_op_request::(&op_manager, &mut conn_manager, op).await; + handle_op_request::(&op_manager, &mut conn_manager, op, None) + .await; if is_operation_completed(&op_result) { if let Some(ref op_execution_callback) = pending_op_result { @@ -774,8 +777,10 @@ async fn process_message_v1( .await; } NetMessageV1::Get(ref op) => { + // Legacy path - no source_addr available let op_result = - handle_op_request::(&op_manager, &mut conn_manager, op).await; + handle_op_request::(&op_manager, &mut conn_manager, op, None) + .await; if is_operation_completed(&op_result) { if let Some(ref op_execution_callback) = pending_op_result { let tx_id = *op.id(); @@ -796,10 +801,12 @@ async fn process_message_v1( .await; } NetMessageV1::Subscribe(ref op) => { + // Legacy path - no source_addr available let op_result = handle_op_request::( &op_manager, &mut conn_manager, op, + None, ) .await; if is_operation_completed(&op_result) { @@ -822,9 +829,14 @@ async fn process_message_v1( .await; } NetMessageV1::Update(ref op) => { - let op_result = - handle_op_request::(&op_manager, &mut conn_manager, op) - .await; + // Legacy path - no source_addr available + let op_result = handle_op_request::( + &op_manager, + &mut conn_manager, + op, + None, + ) + .await; if is_operation_completed(&op_result) { if let Some(ref op_execution_callback) = pending_op_result { let tx_id = *op.id(); @@ -864,7 +876,7 @@ async fn process_message_v1( #[allow(clippy::too_many_arguments)] async fn handle_pure_network_message_v1( msg: NetMessageV1, - _source_addr: Option, + source_addr: Option, op_manager: Arc, mut conn_manager: CB, event_listener: &mut dyn NetEventRegister, @@ -892,10 +904,14 @@ where transaction = %msg.id(), tx_type = %msg.id().transaction_type() ); - let op_result = - handle_op_request::(&op_manager, &mut conn_manager, op) - .instrument(span) - .await; + let op_result = handle_op_request::( + &op_manager, + &mut conn_manager, + op, + source_addr, + ) + .instrument(span) + .await; if let Err(OpError::OpNotAvailable(state)) = &op_result { match state { @@ -924,8 +940,13 @@ where tx = %op.id(), "handle_pure_network_message_v1: Processing PUT message" ); - let op_result = - handle_op_request::(&op_manager, &mut conn_manager, op).await; + let op_result = handle_op_request::( + &op_manager, + &mut conn_manager, + op, + source_addr, + ) + .await; tracing::debug!( tx = %op.id(), op_result_ok = op_result.is_ok(), @@ -966,8 +987,13 @@ where .await; } NetMessageV1::Get(ref op) => { - let op_result = - handle_op_request::(&op_manager, &mut conn_manager, op).await; + let op_result = handle_op_request::( + &op_manager, + &mut conn_manager, + op, + source_addr, + ) + .await; // Handle pending operation results (network concern) if is_operation_completed(&op_result) { @@ -1003,9 +1029,13 @@ where .await; } NetMessageV1::Update(ref op) => { - let op_result = - handle_op_request::(&op_manager, &mut conn_manager, op) - .await; + let op_result = handle_op_request::( + &op_manager, + &mut conn_manager, + op, + source_addr, + ) + .await; if let Err(OpError::OpNotAvailable(state)) = &op_result { match state { @@ -1034,6 +1064,7 @@ where &op_manager, &mut conn_manager, op, + source_addr, ) .await; diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index e4423b7ca..7299dfc2b 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -113,6 +113,7 @@ impl fmt::Display for ConnectMsg { } impl ConnectMsg { + #[allow(dead_code)] pub fn sender(&self) -> Option { match self { ConnectMsg::Response { sender, .. } => Some(sender.peer()), @@ -751,12 +752,13 @@ impl Operation for ConnectOp { async fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, + source_addr: Option, ) -> Result, OpError> { let tx = *msg.id(); match op_manager.pop(msg.id()) { Ok(Some(OpEnum::Connect(op))) => Ok(OpInitialization { op: *op, - sender: msg.sender(), + source_addr, }), Ok(Some(other)) => { op_manager.push(tx, other).await?; @@ -775,7 +777,7 @@ impl Operation for ConnectOp { return Err(OpError::OpNotPresent(tx)); } }; - Ok(OpInitialization { op, sender: None }) + Ok(OpInitialization { op, source_addr }) } Err(err) => Err(err.into()), } @@ -786,6 +788,7 @@ impl Operation for ConnectOp { network_bridge: &'a mut NB, op_manager: &'a OpManager, msg: &'a Self::Message, + _source_addr: Option, ) -> std::pin::Pin< Box> + Send + 'a>, > { diff --git a/crates/core/src/operations/get.rs b/crates/core/src/operations/get.rs index 9eb1b0da3..017ed2827 100644 --- a/crates/core/src/operations/get.rs +++ b/crates/core/src/operations/get.rs @@ -445,15 +445,15 @@ impl Operation for GetOp { async fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, + source_addr: Option, ) -> Result, OpError> { - let mut sender: Option = None; - if let Some(peer_key_loc) = msg.sender().cloned() { - sender = Some(peer_key_loc.peer()); - }; let tx = *msg.id(); match op_manager.pop(msg.id()) { Ok(Some(OpEnum::Get(get_op))) => { - Ok(OpInitialization { op: get_op, sender }) + Ok(OpInitialization { + op: get_op, + source_addr, + }) // was an existing operation, other peer messaged back } Ok(Some(op)) => { @@ -470,7 +470,7 @@ impl Operation for GetOp { result: None, stats: None, // don't care about stats in target peers }, - sender, + source_addr, }) } Err(err) => Err(err.into()), @@ -486,6 +486,7 @@ impl Operation for GetOp { _conn_manager: &'a mut NB, op_manager: &'a OpManager, input: &'a Self::Message, + _source_addr: Option, ) -> Pin> + Send + 'a>> { Box::pin(async move { #[allow(unused_assignments)] diff --git a/crates/core/src/operations/mod.rs b/crates/core/src/operations/mod.rs index 17181d475..5244a1e03 100644 --- a/crates/core/src/operations/mod.rs +++ b/crates/core/src/operations/mod.rs @@ -12,7 +12,7 @@ use crate::{ client_events::HostResult, contract::{ContractError, ExecutorError}, message::{InnerMessage, MessageStats, NetMessage, NetMessageV1, Transaction, TransactionType}, - node::{ConnectionError, NetworkBridge, OpManager, OpNotAvailable, PeerId}, + node::{ConnectionError, NetworkBridge, OpManager, OpNotAvailable}, ring::{Location, PeerKeyLocation, RingError}, }; @@ -33,6 +33,7 @@ where fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, + source_addr: Option, ) -> impl Future, OpError>> + 'a; fn id(&self) -> &Transaction; @@ -43,7 +44,7 @@ where conn_manager: &'a mut CB, op_manager: &'a OpManager, input: &'a Self::Message, - // client_id: Option, + source_addr: Option, ) -> Pin> + Send + 'a>>; } @@ -58,28 +59,33 @@ pub(crate) struct OperationResult { } pub(crate) struct OpInitialization { - sender: Option, - op: Op, + /// The source address of the peer that sent this message. + /// Used for sending error responses (Aborted) and as upstream_addr. + /// Note: Currently unused but prepared for Phase 4 of #2164. + #[allow(dead_code)] + pub source_addr: Option, + pub op: Op, } pub(crate) async fn handle_op_request( op_manager: &OpManager, network_bridge: &mut NB, msg: &Op::Message, + source_addr: Option, ) -> Result, OpError> where Op: Operation, NB: NetworkBridge, { - let sender; let tx = *msg.id(); let result = { - let OpInitialization { sender: s, op } = Op::load_or_init(op_manager, msg).await?; - sender = s; - op.process_message(network_bridge, op_manager, msg).await + let OpInitialization { source_addr: _, op } = + Op::load_or_init(op_manager, msg, source_addr).await?; + op.process_message(network_bridge, op_manager, msg, source_addr) + .await }; - handle_op_result(op_manager, network_bridge, result, tx, sender).await + handle_op_result(op_manager, network_bridge, result, tx, source_addr).await } #[inline(always)] @@ -88,7 +94,7 @@ async fn handle_op_result( network_bridge: &mut CB, result: Result, tx_id: Transaction, - sender: Option, + source_addr: Option, ) -> Result, OpError> where CB: NetworkBridge, @@ -100,9 +106,9 @@ where return Ok(None); } Err(err) => { - if let Some(sender) = sender { + if let Some(addr) = source_addr { network_bridge - .send(sender.addr, NetMessage::V1(NetMessageV1::Aborted(tx_id))) + .send(addr, NetMessage::V1(NetMessageV1::Aborted(tx_id))) .await?; } return Err(err); diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index ffb8ad2b9..7ca7b9939 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -90,12 +90,8 @@ impl Operation for PutOp { async fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, + source_addr: Option, ) -> Result, OpError> { - let mut sender: Option = None; - if let Some(peer_key_loc) = msg.sender().cloned() { - sender = Some(peer_key_loc.peer()); - }; - let tx = *msg.id(); tracing::debug!( tx = %tx, @@ -111,7 +107,10 @@ impl Operation for PutOp { state = %put_op.state.as_ref().map(|s| format!("{:?}", s)).unwrap_or_else(|| "None".to_string()), "PutOp::load_or_init: Found existing PUT operation" ); - Ok(OpInitialization { op: put_op, sender }) + Ok(OpInitialization { + op: put_op, + source_addr, + }) } Ok(Some(op)) => { tracing::warn!( @@ -132,7 +131,7 @@ impl Operation for PutOp { state: Some(PutState::ReceivedRequest), id: tx, }, - sender, + source_addr, }) } Err(err) => { @@ -155,6 +154,7 @@ impl Operation for PutOp { conn_manager: &'a mut NB, op_manager: &'a OpManager, input: &'a Self::Message, + _source_addr: Option, ) -> Pin> + Send + 'a>> { Box::pin(async move { let return_msg; @@ -1633,6 +1633,7 @@ mod messages { } impl PutMsg { + #[allow(dead_code)] pub fn sender(&self) -> Option<&PeerKeyLocation> { match self { Self::SeekNode { sender, .. } => Some(sender), diff --git a/crates/core/src/operations/subscribe.rs b/crates/core/src/operations/subscribe.rs index 1a237f0ba..55bd3bd80 100644 --- a/crates/core/src/operations/subscribe.rs +++ b/crates/core/src/operations/subscribe.rs @@ -325,11 +325,8 @@ impl Operation for SubscribeOp { async fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, + source_addr: Option, ) -> Result, OpError> { - let mut sender: Option = None; - if let Some(peer_key_loc) = msg.sender().cloned() { - sender = Some(peer_key_loc.peer()); - }; let id = *msg.id(); match op_manager.pop(msg.id()) { @@ -337,7 +334,7 @@ impl Operation for SubscribeOp { // was an existing operation, the other peer messaged back Ok(OpInitialization { op: subscribe_op, - sender, + source_addr, }) } Ok(Some(op)) => { @@ -351,7 +348,7 @@ impl Operation for SubscribeOp { state: Some(SubscribeState::ReceivedRequest), id, }, - sender, + source_addr, }) } Err(err) => Err(err.into()), @@ -367,6 +364,7 @@ impl Operation for SubscribeOp { _conn_manager: &'a mut NB, op_manager: &'a OpManager, input: &'a Self::Message, + _source_addr: Option, ) -> Pin> + Send + 'a>> { Box::pin(async move { let return_msg; @@ -973,6 +971,7 @@ mod messages { } impl SubscribeMsg { + #[allow(dead_code)] pub fn sender(&self) -> Option<&PeerKeyLocation> { match self { Self::ReturnSub { sender, .. } => Some(sender), diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index c56f82080..e7e8f218e 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -88,17 +88,14 @@ impl Operation for UpdateOp { async fn load_or_init<'a>( op_manager: &'a crate::node::OpManager, msg: &'a Self::Message, + source_addr: Option, ) -> Result, OpError> { - let mut sender: Option = None; - if let Some(peer_key_loc) = msg.sender().cloned() { - sender = Some(peer_key_loc.peer()); - }; let tx = *msg.id(); match op_manager.pop(msg.id()) { Ok(Some(OpEnum::Update(update_op))) => { Ok(OpInitialization { op: update_op, - sender, + source_addr, }) // was an existing operation, other peer messaged back } @@ -108,14 +105,14 @@ impl Operation for UpdateOp { } Ok(None) => { // new request to get a value for a contract, initialize the machine - tracing::debug!(tx = %tx, sender = ?sender, "initializing new op"); + tracing::debug!(tx = %tx, ?source_addr, "initializing new op"); Ok(OpInitialization { op: Self { state: Some(UpdateState::ReceivedRequest), id: tx, stats: None, // don't care about stats in target peers }, - sender, + source_addr, }) } Err(err) => Err(err.into()), @@ -131,7 +128,7 @@ impl Operation for UpdateOp { conn_manager: &'a mut NB, op_manager: &'a crate::node::OpManager, input: &'a Self::Message, - // _client_id: Option, + _source_addr: Option, ) -> std::pin::Pin< Box> + Send + 'a>, > { @@ -1305,6 +1302,7 @@ mod messages { } impl UpdateMsg { + #[allow(dead_code)] pub fn sender(&self) -> Option<&PeerKeyLocation> { match self { Self::RequestUpdate { sender, .. } => Some(sender), From ca24455c0f6dd325146d8dda3a2e95201a1b6968 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 23:26:13 -0600 Subject: [PATCH 12/32] refactor(get): add upstream_addr to GetOp for connection-based routing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add upstream_addr field to GetOp struct to track where requests came from - Store source_addr as upstream_addr when initializing GetOp from incoming messages - Thread upstream_addr through helper functions (build_op_result, try_forward_or_return) This is the foundation for connection-based routing where responses are sent back via the stored upstream address rather than embedded message fields. Part of #2164 connection-based routing refactor. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/operations/get.rs | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/crates/core/src/operations/get.rs b/crates/core/src/operations/get.rs index 017ed2827..c89104105 100644 --- a/crates/core/src/operations/get.rs +++ b/crates/core/src/operations/get.rs @@ -45,6 +45,7 @@ pub(crate) fn start_op(key: ContractKey, fetch_contract: bool, subscribe: bool) transfer_time: None, first_response_time: None, })), + upstream_addr: None, // Local operation, no upstream peer } } @@ -73,6 +74,7 @@ pub(crate) fn start_op_with_id( transfer_time: None, first_response_time: None, })), + upstream_addr: None, // Local operation, no upstream peer } } @@ -146,6 +148,7 @@ pub(crate) async fn request_get( contract, }), stats: get_op.stats, + upstream_addr: get_op.upstream_addr, }; op_manager.push(*id, OpEnum::Get(completed_op)).await?; @@ -230,6 +233,7 @@ pub(crate) async fn request_get( s.next_peer = Some(target); s }), + upstream_addr: get_op.upstream_addr, }; op_manager @@ -342,6 +346,9 @@ pub(crate) struct GetOp { state: Option, pub(super) result: Option, stats: Option>, + /// The address we received this operation's message from. + /// Used for connection-based routing: responses are sent back to this address. + upstream_addr: Option, } impl GetOp { @@ -469,6 +476,7 @@ impl Operation for GetOp { id: tx, result: None, stats: None, // don't care about stats in target peers + upstream_addr: source_addr, // Connection-based routing: store who sent us this request }, source_addr, }) @@ -642,6 +650,7 @@ impl Operation for GetOp { new_skip_list, op_manager, stats, + self.upstream_addr, ) .await; } @@ -686,6 +695,7 @@ impl Operation for GetOp { }), None, stats, + self.upstream_addr, ); } @@ -797,6 +807,7 @@ impl Operation for GetOp { new_skip_list, op_manager, stats, + self.upstream_addr, ) .await; } @@ -1130,6 +1141,7 @@ impl Operation for GetOp { state: self.state, result: None, stats, + upstream_addr: self.upstream_addr, }), ) .await?; @@ -1317,7 +1329,14 @@ impl Operation for GetOp { } } - build_op_result(self.id, new_state, return_msg, result, stats) + build_op_result( + self.id, + new_state, + return_msg, + result, + stats, + self.upstream_addr, + ) }) } } @@ -1328,12 +1347,14 @@ fn build_op_result( msg: Option, result: Option, stats: Option>, + upstream_addr: Option, ) -> Result { let output_op = state.map(|state| GetOp { id, state: Some(state), result, stats, + upstream_addr, }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), @@ -1342,6 +1363,7 @@ fn build_op_result( }) } +#[allow(clippy::too_many_arguments)] async fn try_forward_or_return( id: Transaction, key: ContractKey, @@ -1350,6 +1372,7 @@ async fn try_forward_or_return( skip_list: HashSet, op_manager: &OpManager, stats: Option>, + upstream_addr: Option, ) -> Result { tracing::warn!( tx = %id, @@ -1426,6 +1449,7 @@ async fn try_forward_or_return( }), None, stats, + upstream_addr, ) } else { tracing::debug!( @@ -1450,6 +1474,7 @@ async fn try_forward_or_return( }), None, stats, + upstream_addr, ) } } From 054cc7b18722c62c6d4144c4d5cb1bb85671b953 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 23:29:53 -0600 Subject: [PATCH 13/32] refactor(subscribe): add upstream_addr to SubscribeOp for connection-based routing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add upstream_addr field to SubscribeOp struct to track where requests came from - Store source_addr as upstream_addr when initializing SubscribeOp from incoming messages - Thread upstream_addr through build_op_result helper function - Update test to include the new field Part of #2164 connection-based routing refactor. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/operations/subscribe.rs | 33 +++++++++++++++---- crates/core/src/operations/subscribe/tests.rs | 1 + 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/crates/core/src/operations/subscribe.rs b/crates/core/src/operations/subscribe.rs index 55bd3bd80..8ddbb9bc1 100644 --- a/crates/core/src/operations/subscribe.rs +++ b/crates/core/src/operations/subscribe.rs @@ -128,13 +128,21 @@ impl TryFrom for SubscribeResult { pub(crate) fn start_op(key: ContractKey) -> SubscribeOp { let id = Transaction::new::(); let state = Some(SubscribeState::PrepareRequest { id, key }); - SubscribeOp { id, state } + SubscribeOp { + id, + state, + upstream_addr: None, // Local operation, no upstream peer + } } /// Create a Subscribe operation with a specific transaction ID (for operation deduplication) pub(crate) fn start_op_with_id(key: ContractKey, id: Transaction) -> SubscribeOp { let state = Some(SubscribeState::PrepareRequest { id, key }); - SubscribeOp { id, state } + SubscribeOp { + id, + state, + upstream_addr: None, // Local operation, no upstream peer + } } /// Request to subscribe to value changes from a contract. @@ -244,6 +252,7 @@ pub(crate) async fn request_subscribe( let op = SubscribeOp { id: *id, state: new_state, + upstream_addr: sub_op.upstream_addr, }; op_manager .notify_op_change(NetMessage::from(msg), OpEnum::Subscribe(op)) @@ -290,6 +299,9 @@ async fn complete_local_subscription( pub(crate) struct SubscribeOp { pub id: Transaction, state: Option, + /// The address we received this operation's message from. + /// Used for connection-based routing: responses are sent back to this address. + upstream_addr: Option, } impl SubscribeOp { @@ -342,11 +354,12 @@ impl Operation for SubscribeOp { Err(OpError::OpNotPresent(id)) } Ok(None) => { - // new request to subcribe to a contract, initialize the machine + // new request to subscribe to a contract, initialize the machine Ok(OpInitialization { op: Self { state: Some(SubscribeState::ReceivedRequest), id, + upstream_addr: source_addr, // Connection-based routing: store who sent us this request }, source_addr, }) @@ -468,7 +481,7 @@ impl Operation for SubscribeOp { return Err(err); } - return build_op_result(self.id, None, None); + return build_op_result(self.id, None, None, self.upstream_addr); } let return_msg = SubscribeMsg::ReturnSub { @@ -479,7 +492,12 @@ impl Operation for SubscribeOp { subscribed: true, }; - return build_op_result(self.id, None, Some(return_msg)); + return build_op_result( + self.id, + None, + Some(return_msg), + self.upstream_addr, + ); } let mut skip = HashSet::new(); @@ -641,6 +659,7 @@ impl Operation for SubscribeOp { retries: *retries, }) .into(), + self.upstream_addr, ); } // After fetch attempt we should now have the contract locally. @@ -876,7 +895,7 @@ impl Operation for SubscribeOp { _ => return Err(OpError::UnexpectedOpState), } - build_op_result(self.id, new_state, return_msg) + build_op_result(self.id, new_state, return_msg, self.upstream_addr) }) } } @@ -885,10 +904,12 @@ fn build_op_result( id: Transaction, state: Option, msg: Option, + upstream_addr: Option, ) -> Result { let output_op = state.map(|state| SubscribeOp { id, state: Some(state), + upstream_addr, }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), diff --git a/crates/core/src/operations/subscribe/tests.rs b/crates/core/src/operations/subscribe/tests.rs index abca9f655..f4bf849f1 100644 --- a/crates/core/src/operations/subscribe/tests.rs +++ b/crates/core/src/operations/subscribe/tests.rs @@ -485,6 +485,7 @@ async fn test_subscription_validates_k_closest_usage() { upstream_subscriber: None, current_hop: 5, }), + upstream_addr: None, }; // Verify skip list is maintained in state From 74f095fb04414ec3e20e3b87682bbf52914db25e Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Thu, 27 Nov 2025 23:36:05 -0600 Subject: [PATCH 14/32] refactor(operations): add upstream_addr to PutOp and UpdateOp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add upstream_addr field to PutOp and UpdateOp structs - Store source_addr as upstream_addr when initializing operations from incoming messages - Thread upstream_addr through helper functions (build_op_result, try_to_broadcast) This completes the upstream_addr addition to all operations (Get, Subscribe, Put, Update), establishing the foundation for connection-based routing where responses are sent back via the stored upstream address rather than embedded message fields. Part of #2164 connection-based routing refactor. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/operations/put.rs | 26 +++++++++++++++++++++++--- crates/core/src/operations/update.rs | 11 ++++++++++- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index 7ca7b9939..ffdfaf3ad 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -25,6 +25,9 @@ use crate::{ pub(crate) struct PutOp { pub id: Transaction, state: Option, + /// The address we received this operation's message from. + /// Used for connection-based routing: responses are sent back to this address. + upstream_addr: Option, } impl PutOp { @@ -130,6 +133,7 @@ impl Operation for PutOp { op: Self { state: Some(PutState::ReceivedRequest), id: tx, + upstream_addr: source_addr, // Connection-based routing: store who sent us this request }, source_addr, }) @@ -457,6 +461,7 @@ impl Operation for PutOp { (broadcast_to, sender.clone()), key, (contract.clone(), value.clone()), + self.upstream_addr, ) .await { @@ -510,6 +515,7 @@ impl Operation for PutOp { (broadcast_to, sender.clone()), *key, (contract.clone(), updated_value), + self.upstream_addr, ) .await { @@ -895,6 +901,7 @@ impl Operation for PutOp { (broadcast_to, sender.clone()), key, (contract.clone(), new_value.clone()), + self.upstream_addr, ) .await { @@ -908,7 +915,7 @@ impl Operation for PutOp { _ => return Err(OpError::UnexpectedOpState), } - build_op_result(self.id, new_state, return_msg) + build_op_result(self.id, new_state, return_msg, self.upstream_addr) }) } } @@ -934,10 +941,12 @@ fn build_op_result( id: Transaction, state: Option, msg: Option, + upstream_addr: Option, ) -> Result { let output_op = state.map(|op| PutOp { id, state: Some(op), + upstream_addr, }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), @@ -956,6 +965,7 @@ async fn try_to_broadcast( (broadcast_to, upstream): (Vec, PeerKeyLocation), key: ContractKey, (contract, new_value): (ContractContainer, WrappedState), + upstream_addr: Option, ) -> Result<(Option, Option), OpError> { let new_state; let return_msg; @@ -1041,6 +1051,7 @@ async fn try_to_broadcast( let op = PutOp { id, state: new_state, + upstream_addr, }; op_manager .notify_op_change(NetMessage::from(return_msg.unwrap()), OpEnum::Put(op)) @@ -1084,7 +1095,11 @@ pub(crate) fn start_op( subscribe, }); - PutOp { id, state } + PutOp { + id, + state, + upstream_addr: None, // Local operation, no upstream peer + } } /// Create a PUT operation with a specific transaction ID (for operation deduplication) @@ -1109,7 +1124,11 @@ pub(crate) fn start_op_with_id( subscribe, }); - PutOp { id, state } + PutOp { + id, + state, + upstream_addr: None, // Local operation, no upstream peer + } } #[derive(Debug)] @@ -1244,6 +1263,7 @@ pub(crate) async fn request_put(op_manager: &OpManager, mut put_op: PutOp) -> Re (broadcast_to, sender), key, (contract.clone(), updated_value), + put_op.upstream_addr, ) .await?; diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index e7e8f218e..f3f8f3196 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -18,6 +18,9 @@ pub(crate) struct UpdateOp { pub id: Transaction, pub(crate) state: Option, stats: Option, + /// The address we received this operation's message from. + /// Used for connection-based routing: responses are sent back to this address. + upstream_addr: Option, } impl UpdateOp { @@ -111,6 +114,7 @@ impl Operation for UpdateOp { state: Some(UpdateState::ReceivedRequest), id: tx, stats: None, // don't care about stats in target peers + upstream_addr: source_addr, // Connection-based routing: store who sent us this request }, source_addr, }) @@ -622,7 +626,7 @@ impl Operation for UpdateOp { _ => return Err(OpError::UnexpectedOpState), } - build_op_result(self.id, new_state, return_msg, stats) + build_op_result(self.id, new_state, return_msg, stats, self.upstream_addr) }) } } @@ -753,11 +757,13 @@ fn build_op_result( state: Option, return_msg: Option, stats: Option, + upstream_addr: Option, ) -> Result { let output_op = state.map(|op| UpdateOp { id, state: Some(op), stats, + upstream_addr, }); let state = output_op.map(OpEnum::Update); Ok(OperationResult { @@ -909,6 +915,7 @@ pub(crate) fn start_op( id, state, stats: Some(UpdateStats { target: None }), + upstream_addr: None, // Local operation, no upstream peer } } @@ -933,6 +940,7 @@ pub(crate) fn start_op_with_id( id, state, stats: Some(UpdateStats { target: None }), + upstream_addr: None, // Local operation, no upstream peer } } @@ -1174,6 +1182,7 @@ async fn deliver_update_result( summary: summary.clone(), }), stats: None, + upstream_addr: None, // Terminal state, no routing needed }; let host_result = op.to_host_result(); From 784309aa8d2d6e8884890783adc7671ccf024446 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Fri, 28 Nov 2025 20:06:21 -0600 Subject: [PATCH 15/32] fix: preserve target address through bridge event for NAT routing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The handle_bridge_msg function was discarding the target address passed from P2pBridge::send(), using only the message for ConnEvent::OutboundMessage. The event loop then extracted the target from msg.target().addr(), which contains the peer's self-reported address rather than the NAT-translated transport address. This caused subscribe responses to be sent to wrong addresses in NAT scenarios (e.g., 127.0.0.1:31337 instead of 172.26.0.102:31337). Fix: - Add ConnEvent::OutboundMessageWithTarget variant with explicit target_addr - Modify handle_bridge_msg to use this variant with target.addr from the PeerId - Add handler that uses target_addr directly for connection lookup 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../src/node/network_bridge/p2p_protoc.rs | 68 ++++++++++++++++++- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index 7390e6b3a..7d5729f51 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -599,6 +599,53 @@ impl P2pConnManager { } } } + ConnEvent::OutboundMessageWithTarget { target_addr, msg } => { + // This variant uses an explicit target address from OperationResult.target_addr, + // which is critical for NAT scenarios where the address in the message + // differs from the actual transport address we should send to. + tracing::info!( + tx = %msg.id(), + msg_type = %msg, + target_addr = %target_addr, + msg_target = ?msg.target().map(|t| t.addr()), + "Sending outbound message with explicit target address (NAT routing)" + ); + + // Look up the connection using the explicit target address + let peer_connection = ctx.connections.get(&target_addr); + + match peer_connection { + Some(peer_connection) => { + if let Err(e) = + peer_connection.sender.send(Left(msg.clone())).await + { + tracing::error!( + tx = %msg.id(), + target_addr = %target_addr, + "Failed to send message to peer: {}", e + ); + } else { + tracing::info!( + tx = %msg.id(), + target_addr = %target_addr, + "Message successfully sent to peer connection via explicit address" + ); + } + } + None => { + // No existing connection - this is unexpected for NAT scenarios + // since we should have the connection from the original request + tracing::error!( + tx = %msg.id(), + target_addr = %target_addr, + msg_target = ?msg.target().map(|t| t.addr()), + connections = ?ctx.connections.keys().collect::>(), + "No connection found for explicit target address - NAT routing failed" + ); + ctx.bridge.op_manager.completed(*msg.id()); + } + } + } ConnEvent::TransportClosed { remote_addr, error } => { tracing::debug!( remote = %remote_addr, @@ -2211,8 +2258,19 @@ impl P2pConnManager { fn handle_bridge_msg(&self, msg: Option) -> EventResult { match msg { - Some(Left((_target, msg))) => { - EventResult::Event(ConnEvent::OutboundMessage(*msg).into()) + Some(Left((target, msg))) => { + // Use OutboundMessageWithTarget to preserve the target address from + // OperationResult.target_addr. This is critical for NAT scenarios where + // the address in the message differs from the actual transport address. + // The PeerId.addr contains the address that was used to look up the peer + // in P2pBridge::send(), which is the correct transport address. + EventResult::Event( + ConnEvent::OutboundMessageWithTarget { + target_addr: target.addr, + msg: *msg, + } + .into(), + ) } Some(Right(action)) => EventResult::Event(ConnEvent::NodeAction(action).into()), None => EventResult::Event(ConnEvent::ClosedChannel(ChannelCloseReason::Bridge).into()), @@ -2343,6 +2401,12 @@ enum EventResult { pub(super) enum ConnEvent { InboundMessage(IncomingMessage), OutboundMessage(NetMessage), + /// Outbound message with explicit target address from OperationResult.target_addr. + /// Used when the target address differs from what's in the message (NAT scenarios). + OutboundMessageWithTarget { + target_addr: SocketAddr, + msg: NetMessage, + }, NodeAction(NodeEvent), ClosedChannel(ChannelCloseReason), TransportClosed { From 15eb45152f701e109a24d5420cb8638c3be52af8 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 09:40:09 -0600 Subject: [PATCH 16/32] refactor: use ObservedAddr newtype for source_addr throughout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses Nacho's architectural feedback to avoid raw SocketAddr in protocol layer. Uses ObservedAddr newtype to wrap transport-layer addresses, making the address semantics explicit at the type level. Changes: - Add ObservedAddr newtype in transport/mod.rs - Update Operation trait to use Option for source_addr - Update all operation implementations (connect, get, put, subscribe, update) - Update node/mod.rs and p2p_protoc.rs to use ObservedAddr - Wrap incoming source_addr in ObservedAddr::new() at transport boundary - Convert back to SocketAddr at network send boundaries The conversion to raw SocketAddr happens at transport boundaries: - build_op_result() converts for target_addr - network_bridge.send() calls use .socket_addr() 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/node/mod.rs | 8 ++--- .../src/node/network_bridge/p2p_protoc.rs | 17 ++++++---- crates/core/src/operations/connect.rs | 6 ++-- crates/core/src/operations/get.rs | 13 +++---- crates/core/src/operations/mod.rs | 20 ++++++----- crates/core/src/operations/put.rs | 13 +++---- crates/core/src/operations/subscribe.rs | 16 +++++---- crates/core/src/operations/update.rs | 11 +++--- crates/core/src/transport/mod.rs | 34 +++++++++++++++++++ 9 files changed, 92 insertions(+), 46 deletions(-) diff --git a/crates/core/src/node/mod.rs b/crates/core/src/node/mod.rs index d0c1e2cab..bf17154d4 100644 --- a/crates/core/src/node/mod.rs +++ b/crates/core/src/node/mod.rs @@ -61,7 +61,7 @@ use crate::operations::handle_op_request; pub(crate) use network_bridge::{ConnectionError, EventLoopNotificationsSender, NetworkBridge}; use crate::topology::rate::Rate; -use crate::transport::{TransportKeypair, TransportPublicKey}; +use crate::transport::{ObservedAddr, TransportKeypair, TransportPublicKey}; pub(crate) use op_state_manager::{OpManager, OpNotAvailable}; mod message_processor; @@ -620,7 +620,7 @@ pub(super) async fn process_message( #[allow(clippy::too_many_arguments)] pub(crate) async fn process_message_decoupled( msg: NetMessage, - source_addr: Option, + source_addr: Option, op_manager: Arc, conn_manager: CB, mut event_listener: Box, @@ -681,7 +681,7 @@ pub(crate) async fn process_message_decoupled( #[allow(clippy::too_many_arguments)] async fn handle_pure_network_message( msg: NetMessage, - source_addr: Option, + source_addr: Option, op_manager: Arc, conn_manager: CB, event_listener: &mut dyn NetEventRegister, @@ -876,7 +876,7 @@ async fn process_message_v1( #[allow(clippy::too_many_arguments)] async fn handle_pure_network_message_v1( msg: NetMessageV1, - source_addr: Option, + source_addr: Option, op_manager: Arc, mut conn_manager: CB, event_listener: &mut dyn NetEventRegister, diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index 7d5729f51..35c1fcc19 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -30,8 +30,8 @@ use crate::node::{MessageProcessor, PeerId}; use crate::operations::{connect::ConnectMsg, get::GetMsg, put::PutMsg, update::UpdateMsg}; use crate::ring::Location; use crate::transport::{ - create_connection_handler, OutboundConnectionHandler, PeerConnection, TransportError, - TransportKeypair, TransportPublicKey, + create_connection_handler, ObservedAddr, OutboundConnectionHandler, PeerConnection, + TransportError, TransportKeypair, TransportPublicKey, }; use crate::{ client_events::ClientId, @@ -387,8 +387,13 @@ impl P2pConnManager { // Pass the source address through to operations for routing. // This replaces the old rewrite_sender_addr hack - instead of mutating // message contents, we pass the observed transport address separately. - ctx.handle_inbound_message(msg, remote, &op_manager, &mut state) - .await?; + ctx.handle_inbound_message( + msg, + remote.map(ObservedAddr::new), + &op_manager, + &mut state, + ) + .await?; } ConnEvent::OutboundMessage(NetMessage::V1(NetMessageV1::Aborted(tx))) => { // TODO: handle aborted transaction as internal message @@ -1309,7 +1314,7 @@ impl P2pConnManager { async fn handle_inbound_message( &self, msg: NetMessage, - source_addr: Option, + source_addr: Option, op_manager: &Arc, state: &mut EventListenerState, ) -> anyhow::Result<()> { @@ -1335,7 +1340,7 @@ impl P2pConnManager { async fn process_message( &self, msg: NetMessage, - source_addr: Option, + source_addr: Option, op_manager: &Arc, executor_callback_opt: Option>, state: &mut EventListenerState, diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index 7299dfc2b..f7587772a 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -22,7 +22,7 @@ use crate::node::{IsOperationCompleted, NetworkBridge, OpManager, PeerId}; use crate::operations::{OpEnum, OpError, OpInitialization, OpOutcome, Operation, OperationResult}; use crate::ring::PeerKeyLocation; use crate::router::{EstimatorType, IsotonicEstimator, IsotonicEvent}; -use crate::transport::TransportKeypair; +use crate::transport::{ObservedAddr, TransportKeypair, TransportPublicKey}; use crate::util::{Backoff, Contains, IterExt}; use freenet_stdlib::client_api::HostResponse; @@ -752,7 +752,7 @@ impl Operation for ConnectOp { async fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, - source_addr: Option, + source_addr: Option, ) -> Result, OpError> { let tx = *msg.id(); match op_manager.pop(msg.id()) { @@ -788,7 +788,7 @@ impl Operation for ConnectOp { network_bridge: &'a mut NB, op_manager: &'a OpManager, msg: &'a Self::Message, - _source_addr: Option, + _source_addr: Option, ) -> std::pin::Pin< Box> + Send + 'a>, > { diff --git a/crates/core/src/operations/get.rs b/crates/core/src/operations/get.rs index c89104105..e3dc10002 100644 --- a/crates/core/src/operations/get.rs +++ b/crates/core/src/operations/get.rs @@ -13,6 +13,7 @@ use crate::{ node::{NetworkBridge, OpManager, PeerId}, operations::{OpInitialization, Operation}, ring::{Location, PeerKeyLocation, RingError}, + transport::ObservedAddr, }; use super::{OpEnum, OpError, OpOutcome, OperationResult}; @@ -348,7 +349,7 @@ pub(crate) struct GetOp { stats: Option>, /// The address we received this operation's message from. /// Used for connection-based routing: responses are sent back to this address. - upstream_addr: Option, + upstream_addr: Option, } impl GetOp { @@ -452,7 +453,7 @@ impl Operation for GetOp { async fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, - source_addr: Option, + source_addr: Option, ) -> Result, OpError> { let tx = *msg.id(); match op_manager.pop(msg.id()) { @@ -494,7 +495,7 @@ impl Operation for GetOp { _conn_manager: &'a mut NB, op_manager: &'a OpManager, input: &'a Self::Message, - _source_addr: Option, + _source_addr: Option, ) -> Pin> + Send + 'a>> { Box::pin(async move { #[allow(unused_assignments)] @@ -1347,7 +1348,7 @@ fn build_op_result( msg: Option, result: Option, stats: Option>, - upstream_addr: Option, + upstream_addr: Option, ) -> Result { let output_op = state.map(|state| GetOp { id, @@ -1358,7 +1359,7 @@ fn build_op_result( }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), - target_addr: None, + target_addr: upstream_addr.map(|a| a.socket_addr()), state: output_op.map(OpEnum::Get), }) } @@ -1372,7 +1373,7 @@ async fn try_forward_or_return( skip_list: HashSet, op_manager: &OpManager, stats: Option>, - upstream_addr: Option, + upstream_addr: Option, ) -> Result { tracing::warn!( tx = %id, diff --git a/crates/core/src/operations/mod.rs b/crates/core/src/operations/mod.rs index 5244a1e03..b72e252f0 100644 --- a/crates/core/src/operations/mod.rs +++ b/crates/core/src/operations/mod.rs @@ -1,19 +1,18 @@ #[cfg(debug_assertions)] use std::backtrace::Backtrace as StdTrace; -use std::{pin::Pin, time::Duration}; +use std::{net::SocketAddr, pin::Pin, time::Duration}; use freenet_stdlib::prelude::ContractKey; use futures::Future; use tokio::sync::mpsc::error::SendError; -use std::net::SocketAddr; - use crate::{ client_events::HostResult, contract::{ContractError, ExecutorError}, message::{InnerMessage, MessageStats, NetMessage, NetMessageV1, Transaction, TransactionType}, node::{ConnectionError, NetworkBridge, OpManager, OpNotAvailable}, ring::{Location, PeerKeyLocation, RingError}, + transport::ObservedAddr, }; pub(crate) mod connect; @@ -33,7 +32,7 @@ where fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, - source_addr: Option, + source_addr: Option, ) -> impl Future, OpError>> + 'a; fn id(&self) -> &Transaction; @@ -44,7 +43,7 @@ where conn_manager: &'a mut CB, op_manager: &'a OpManager, input: &'a Self::Message, - source_addr: Option, + source_addr: Option, ) -> Pin> + Send + 'a>>; } @@ -63,7 +62,7 @@ pub(crate) struct OpInitialization { /// Used for sending error responses (Aborted) and as upstream_addr. /// Note: Currently unused but prepared for Phase 4 of #2164. #[allow(dead_code)] - pub source_addr: Option, + pub source_addr: Option, pub op: Op, } @@ -71,7 +70,7 @@ pub(crate) async fn handle_op_request( op_manager: &OpManager, network_bridge: &mut NB, msg: &Op::Message, - source_addr: Option, + source_addr: Option, ) -> Result, OpError> where Op: Operation, @@ -94,7 +93,7 @@ async fn handle_op_result( network_bridge: &mut CB, result: Result, tx_id: Transaction, - source_addr: Option, + source_addr: Option, ) -> Result, OpError> where CB: NetworkBridge, @@ -108,7 +107,10 @@ where Err(err) => { if let Some(addr) = source_addr { network_bridge - .send(addr, NetMessage::V1(NetMessageV1::Aborted(tx_id))) + .send( + addr.socket_addr(), + NetMessage::V1(NetMessageV1::Aborted(tx_id)), + ) .await?; } return Err(err); diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index ffdfaf3ad..07d55f998 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -20,6 +20,7 @@ use crate::{ message::{InnerMessage, NetMessage, NetMessageV1, Transaction}, node::{NetworkBridge, OpManager, PeerId}, ring::{Location, PeerKeyLocation}, + transport::ObservedAddr, }; pub(crate) struct PutOp { @@ -27,7 +28,7 @@ pub(crate) struct PutOp { state: Option, /// The address we received this operation's message from. /// Used for connection-based routing: responses are sent back to this address. - upstream_addr: Option, + upstream_addr: Option, } impl PutOp { @@ -93,7 +94,7 @@ impl Operation for PutOp { async fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, - source_addr: Option, + source_addr: Option, ) -> Result, OpError> { let tx = *msg.id(); tracing::debug!( @@ -158,7 +159,7 @@ impl Operation for PutOp { conn_manager: &'a mut NB, op_manager: &'a OpManager, input: &'a Self::Message, - _source_addr: Option, + _source_addr: Option, ) -> Pin> + Send + 'a>> { Box::pin(async move { let return_msg; @@ -941,7 +942,7 @@ fn build_op_result( id: Transaction, state: Option, msg: Option, - upstream_addr: Option, + upstream_addr: Option, ) -> Result { let output_op = state.map(|op| PutOp { id, @@ -950,7 +951,7 @@ fn build_op_result( }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), - target_addr: None, + target_addr: upstream_addr.map(|a| a.socket_addr()), state: output_op.map(OpEnum::Put), }) } @@ -965,7 +966,7 @@ async fn try_to_broadcast( (broadcast_to, upstream): (Vec, PeerKeyLocation), key: ContractKey, (contract, new_value): (ContractContainer, WrappedState), - upstream_addr: Option, + upstream_addr: Option, ) -> Result<(Option, Option), OpError> { let new_state; let return_msg; diff --git a/crates/core/src/operations/subscribe.rs b/crates/core/src/operations/subscribe.rs index 8ddbb9bc1..ad00dad0d 100644 --- a/crates/core/src/operations/subscribe.rs +++ b/crates/core/src/operations/subscribe.rs @@ -11,6 +11,7 @@ use crate::{ message::{InnerMessage, NetMessage, Transaction}, node::{NetworkBridge, OpManager, PeerId}, ring::{Location, PeerKeyLocation, RingError}, + transport::ObservedAddr, }; use freenet_stdlib::{ client_api::{ContractResponse, ErrorKind, HostResponse}, @@ -301,7 +302,7 @@ pub(crate) struct SubscribeOp { state: Option, /// The address we received this operation's message from. /// Used for connection-based routing: responses are sent back to this address. - upstream_addr: Option, + upstream_addr: Option, } impl SubscribeOp { @@ -337,7 +338,7 @@ impl Operation for SubscribeOp { async fn load_or_init<'a>( op_manager: &'a OpManager, msg: &'a Self::Message, - source_addr: Option, + source_addr: Option, ) -> Result, OpError> { let id = *msg.id(); @@ -377,7 +378,7 @@ impl Operation for SubscribeOp { _conn_manager: &'a mut NB, op_manager: &'a OpManager, input: &'a Self::Message, - _source_addr: Option, + _source_addr: Option, ) -> Pin> + Send + 'a>> { Box::pin(async move { let return_msg; @@ -442,7 +443,7 @@ impl Operation for SubscribeOp { target: subscriber.clone(), subscribed: false, })), - target_addr: None, + target_addr: self.upstream_addr.map(|a| a.socket_addr()), state: None, }); } @@ -537,6 +538,7 @@ impl Operation for SubscribeOp { let ring_max_htl = op_manager.ring.max_hops_to_live.max(1); let htl = (*htl).min(ring_max_htl); let this_peer = op_manager.ring.connection_manager.own_location(); + let upstream_addr = self.upstream_addr; let return_not_subbed = || -> OperationResult { OperationResult { return_msg: Some(NetMessage::from(SubscribeMsg::ReturnSub { @@ -546,7 +548,7 @@ impl Operation for SubscribeOp { sender: this_peer.clone(), target: subscriber.clone(), })), - target_addr: None, + target_addr: upstream_addr.map(|a| a.socket_addr()), state: None, } }; @@ -904,7 +906,7 @@ fn build_op_result( id: Transaction, state: Option, msg: Option, - upstream_addr: Option, + upstream_addr: Option, ) -> Result { let output_op = state.map(|state| SubscribeOp { id, @@ -913,7 +915,7 @@ fn build_op_result( }); Ok(OperationResult { return_msg: msg.map(NetMessage::from), - target_addr: None, + target_addr: upstream_addr.map(|a| a.socket_addr()), state: output_op.map(OpEnum::Subscribe), }) } diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index f3f8f3196..c69acdd87 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -12,6 +12,7 @@ use crate::ring::{Location, PeerKeyLocation, RingError}; use crate::{ client_events::HostResult, node::{NetworkBridge, OpManager, PeerId}, + transport::ObservedAddr, }; pub(crate) struct UpdateOp { @@ -20,7 +21,7 @@ pub(crate) struct UpdateOp { stats: Option, /// The address we received this operation's message from. /// Used for connection-based routing: responses are sent back to this address. - upstream_addr: Option, + upstream_addr: Option, } impl UpdateOp { @@ -91,7 +92,7 @@ impl Operation for UpdateOp { async fn load_or_init<'a>( op_manager: &'a crate::node::OpManager, msg: &'a Self::Message, - source_addr: Option, + source_addr: Option, ) -> Result, OpError> { let tx = *msg.id(); match op_manager.pop(msg.id()) { @@ -132,7 +133,7 @@ impl Operation for UpdateOp { conn_manager: &'a mut NB, op_manager: &'a crate::node::OpManager, input: &'a Self::Message, - _source_addr: Option, + _source_addr: Option, ) -> std::pin::Pin< Box> + Send + 'a>, > { @@ -757,7 +758,7 @@ fn build_op_result( state: Option, return_msg: Option, stats: Option, - upstream_addr: Option, + upstream_addr: Option, ) -> Result { let output_op = state.map(|op| UpdateOp { id, @@ -768,7 +769,7 @@ fn build_op_result( let state = output_op.map(OpEnum::Update); Ok(OperationResult { return_msg: return_msg.map(NetMessage::from), - target_addr: None, + target_addr: upstream_addr.map(|a| a.socket_addr()), state, }) } diff --git a/crates/core/src/transport/mod.rs b/crates/core/src/transport/mod.rs index d833a27cf..e30deee56 100644 --- a/crates/core/src/transport/mod.rs +++ b/crates/core/src/transport/mod.rs @@ -33,6 +33,40 @@ pub(crate) use self::{ peer_connection::PeerConnection, }; +/// Address observed at the transport layer (from UDP packet source). +/// +/// This is the "ground truth" for NAT scenarios - it's the actual address we see +/// at the network layer, not what the peer claims in protocol messages. +/// +/// Using a newtype instead of raw `SocketAddr` makes the address semantics explicit +/// and prevents accidental confusion with advertised/claimed addresses. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct ObservedAddr(SocketAddr); + +impl ObservedAddr { + /// Create a new observed address from a socket address. + pub fn new(addr: SocketAddr) -> Self { + Self(addr) + } + + /// Get the underlying socket address. + pub fn socket_addr(&self) -> SocketAddr { + self.0 + } +} + +impl std::fmt::Display for ObservedAddr { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for ObservedAddr { + fn from(addr: SocketAddr) -> Self { + Self(addr) + } +} + #[derive(Debug, thiserror::Error)] pub(crate) enum TransportError { #[error("transport handler channel closed, socket likely closed")] From 02a3b5b252837085efa090772a041cad7edab4c3 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 10:45:34 -0600 Subject: [PATCH 17/32] ci: trigger workflow From 28c4f5ad09936dd784f8df236ce860ab4420d864 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 13:27:25 -0600 Subject: [PATCH 18/32] fix: resolve post-rebase compilation errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Use peer() method instead of peer field access - Use PeerKeyLocation::with_location() constructor - Simplify get_peer_by_addr to only check connections_by_location 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../src/node/network_bridge/p2p_protoc.rs | 87 ++++++++-- crates/core/src/operations/connect.rs | 160 +++++++++++++++--- crates/core/src/ring/connection_manager.rs | 13 +- crates/core/src/test_utils.rs | 2 +- 4 files changed, 208 insertions(+), 54 deletions(-) diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index 35c1fcc19..82af379d0 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -467,7 +467,9 @@ impl P2pConnManager { ); match peer_connection { Some(peer_connection) => { - if let Err(e) = peer_connection.sender.send(Left(msg.clone())).await { + if let Err(e) = + peer_connection.sender.send(Left(msg.clone())).await + { tracing::error!( tx = %msg.id(), "Failed to send message to peer: {}", e @@ -674,10 +676,11 @@ impl P2pConnManager { ); // Clean up all active connections - let peers_to_cleanup: Vec<_> = - ctx.connections.iter().map(|(addr, entry)| { - (*addr, entry.pub_key.clone()) - }).collect(); + let peers_to_cleanup: Vec<_> = ctx + .connections + .iter() + .map(|(addr, entry)| (*addr, entry.pub_key.clone())) + .collect(); for (peer_addr, pub_key_opt) in peers_to_cleanup { tracing::debug!(%peer_addr, "Cleaning up active connection due to critical channel closure"); @@ -686,7 +689,16 @@ impl P2pConnManager { PeerId::new(peer_addr, pub_key) } else { // Use our own pub_key as placeholder if we don't know the peer's - PeerId::new(peer_addr, (*ctx.bridge.op_manager.ring.connection_manager.pub_key).clone()) + PeerId::new( + peer_addr, + (*ctx + .bridge + .op_manager + .ring + .connection_manager + .pub_key) + .clone(), + ) }; ctx.bridge .op_manager @@ -746,7 +758,16 @@ impl P2pConnManager { let peer = if let Some(ref pub_key) = entry.pub_key { PeerId::new(peer_addr, pub_key.clone()) } else { - PeerId::new(peer_addr, (*ctx.bridge.op_manager.ring.connection_manager.pub_key).clone()) + PeerId::new( + peer_addr, + (*ctx + .bridge + .op_manager + .ring + .connection_manager + .pub_key) + .clone(), + ) }; let pub_key_to_remove = entry.pub_key.clone(); @@ -845,13 +866,24 @@ impl P2pConnManager { } NodeEvent::QueryConnections { callback } => { // Reconstruct PeerIds from stored connections - let connections: Vec = ctx.connections.iter() + let connections: Vec = ctx + .connections + .iter() .map(|(addr, entry)| { if let Some(ref pub_key) = entry.pub_key { PeerId::new(*addr, pub_key.clone()) } else { // Use our own pub_key as placeholder if we don't know the peer's - PeerId::new(*addr, (*ctx.bridge.op_manager.ring.connection_manager.pub_key).clone()) + PeerId::new( + *addr, + (*ctx + .bridge + .op_manager + .ring + .connection_manager + .pub_key) + .clone(), + ) } }) .collect(); @@ -911,12 +943,23 @@ impl P2pConnManager { } // Reconstruct PeerIds from stored connections - let connections: Vec = ctx.connections.iter() + let connections: Vec = ctx + .connections + .iter() .map(|(addr, entry)| { if let Some(ref pub_key) = entry.pub_key { PeerId::new(*addr, pub_key.clone()) } else { - PeerId::new(*addr, (*ctx.bridge.op_manager.ring.connection_manager.pub_key).clone()) + PeerId::new( + *addr, + (*ctx + .bridge + .op_manager + .ring + .connection_manager + .pub_key) + .clone(), + ) } }) .collect(); @@ -1942,12 +1985,16 @@ impl P2pConnManager { } let (tx, rx) = mpsc::channel(10); tracing::debug!(self_peer = %self.bridge.op_manager.ring.connection_manager.pub_key, %peer_id, conn_map_size = self.connections.len(), "[CONN_TRACK] INSERT: OutboundConnectionSuccessful - adding to connections HashMap"); - self.connections.insert(peer_id.addr, ConnectionEntry { - sender: tx, - pub_key: Some(peer_id.pub_key.clone()), - }); + self.connections.insert( + peer_id.addr, + ConnectionEntry { + sender: tx, + pub_key: Some(peer_id.pub_key.clone()), + }, + ); // Add to reverse lookup - self.addr_by_pub_key.insert(peer_id.pub_key.clone(), peer_id.addr); + self.addr_by_pub_key + .insert(peer_id.pub_key.clone(), peer_id.addr); let Some(conn_events) = self.conn_event_tx.as_ref().cloned() else { anyhow::bail!("Connection event channel not initialized"); }; @@ -2125,7 +2172,8 @@ impl P2pConnManager { entry.pub_key = Some(new_peer_id.pub_key.clone()); } // Add new reverse lookup - self.addr_by_pub_key.insert(new_peer_id.pub_key.clone(), remote_addr); + self.addr_by_pub_key + .insert(new_peer_id.pub_key.clone(), remote_addr); } } } @@ -2154,7 +2202,10 @@ impl P2pConnManager { let peer = if let Some(ref pub_key) = entry.pub_key { PeerId::new(remote_addr, pub_key.clone()) } else { - PeerId::new(remote_addr, (*self.bridge.op_manager.ring.connection_manager.pub_key).clone()) + PeerId::new( + remote_addr, + (*self.bridge.op_manager.ring.connection_manager.pub_key).clone(), + ) }; // Remove from reverse lookup if let Some(pub_key) = entry.pub_key { diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index f7587772a..d654be30c 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -127,8 +127,9 @@ impl ConnectMsg { pub(crate) struct ConnectRequest { /// Joiner's advertised location (fallbacks to the joiner's socket address). pub desired_location: Location, - /// Joiner's identity as observed so far. - pub joiner: PeerKeyLocation, + /// Joiner's identity. NAT peers start as Unknown (just public key) until + /// a gateway observes their address and upgrades them to Known. + pub joiner: Joiner, /// Remaining hops before the request stops travelling. pub ttl: u8, /// Simple visited set to avoid trivial loops. @@ -144,6 +145,90 @@ pub(crate) struct ConnectResponse { pub acceptor: PeerKeyLocation, } +/// Represents a peer joining the network. +/// +/// NAT peers don't know their public address until a gateway observes it, +/// so we distinguish between: +/// - `Unknown`: Only have the public key (NAT peer before address discovery) +/// - `Known`: Have full PeerId with known address (gateway or after ObservedAddress) +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub(crate) enum Joiner { + /// Peer that doesn't yet know its public address (NAT peer before discovery). + Unknown(TransportPublicKey), + /// Peer with a known address (gateway, or NAT peer after ObservedAddress). + Known(PeerId), +} + +impl Joiner { + /// Returns the public key of the joiner. + #[allow(dead_code)] + pub fn pub_key(&self) -> &TransportPublicKey { + match self { + Joiner::Unknown(key) => key, + Joiner::Known(peer_id) => &peer_id.pub_key, + } + } + + /// Returns the PeerId if known, None if address is unknown. + pub fn peer_id(&self) -> Option<&PeerId> { + match self { + Joiner::Unknown(_) => None, + Joiner::Known(peer_id) => Some(peer_id), + } + } + + /// Returns true if this joiner has a known address. + #[allow(dead_code)] + pub fn has_known_address(&self) -> bool { + matches!(self, Joiner::Known(_)) + } + + /// Upgrades an Unknown joiner to Known once we observe their address. + pub fn with_observed_address(&self, addr: SocketAddr) -> Self { + match self { + Joiner::Unknown(key) => Joiner::Known(PeerId::new(addr, key.clone())), + Joiner::Known(peer_id) => { + // Avoid allocation if address hasn't changed + if peer_id.addr == addr { + self.clone() + } else { + Joiner::Known(PeerId::new(addr, peer_id.pub_key.clone())) + } + } + } + } + + /// Converts to a PeerKeyLocation if we have a known address. + /// Returns None if address is unknown. + pub fn to_peer_key_location(&self) -> Option { + match self { + Joiner::Unknown(_) => None, + Joiner::Known(peer_id) => Some(PeerKeyLocation::with_location( + peer_id.pub_key.clone(), + peer_id.addr, + Location::from_address(&peer_id.addr), + )), + } + } + + /// Returns the location if we have a known address. + pub fn location(&self) -> Option { + match self { + Joiner::Unknown(_) => None, + Joiner::Known(peer_id) => Some(Location::from_address(&peer_id.addr)), + } + } +} + +impl fmt::Display for Joiner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Joiner::Unknown(key) => write!(f, "Unknown({})", key), + Joiner::Known(peer_id) => write!(f, "Known({})", peer_id), + } + } +} + /// New minimal state machine the joiner tracks. #[derive(Debug, Clone)] pub(crate) enum ConnectState { @@ -179,7 +264,8 @@ pub(crate) trait RelayContext { fn self_location(&self) -> &PeerKeyLocation; /// Determine whether we should accept the joiner immediately. - fn should_accept(&self, joiner: &PeerKeyLocation) -> bool; + /// Takes a Joiner which may or may not have a known address yet. + fn should_accept(&self, joiner: &Joiner) -> bool; /// Choose the next hop for the request, avoiding peers already visited. fn select_next_hop( @@ -279,23 +365,23 @@ impl RelayState { push_unique_peer(&mut self.request.visited, ctx.self_location().clone()); if let Some(joiner_addr) = self.request.observed_addr { - // Always overwrite with observed socket rather than checking routability. If the - // observed socket is loopback, this guard is skipped only in local/unit tests where - // peers share 127.0.0.1, so keep a one-shot overwrite and avoid early returns. + // Upgrade the joiner to Known with the observed address. + // This is critical for NAT peers who start as Unknown. if !self.observed_sent { - self.request.joiner.peer.addr = joiner_addr; - if self.request.joiner.location.is_none() { - self.request.joiner.location = Some(Location::from_address(&joiner_addr)); - } + self.request.joiner = self.request.joiner.with_observed_address(joiner_addr); self.observed_sent = true; - actions.observed_address = Some((self.request.joiner.clone(), joiner_addr)); + // Now that we have a known address, we can create a PeerKeyLocation + if let Some(joiner_pkl) = self.request.joiner.to_peer_key_location() { + actions.observed_address = Some((joiner_pkl, joiner_addr)); + } } } if !self.accepted_locally && ctx.should_accept(&self.request.joiner) { self.accepted_locally = true; let acceptor = ctx.self_location().clone(); - let dist = ring_distance(acceptor.location, self.request.joiner.location); + let joiner_location = self.request.joiner.location(); + let dist = ring_distance(acceptor.location, joiner_location); actions.accept_response = Some(ConnectResponse { acceptor: acceptor.clone(), }); @@ -388,14 +474,18 @@ impl RelayContext for RelayEnv<'_> { &self.self_location } - fn should_accept(&self, joiner: &PeerKeyLocation) -> bool { + fn should_accept(&self, joiner: &Joiner) -> bool { + // We can only accept joiners with known addresses + let Some(peer_id) = joiner.peer_id() else { + return false; + }; let location = joiner - .location - .unwrap_or_else(|| Location::from_address(&joiner.peer.addr)); + .location() + .unwrap_or_else(|| Location::from_address(&peer_id.addr)); self.op_manager .ring .connection_manager - .should_accept(location, &joiner.peer) + .should_accept(location, peer_id) } fn select_next_hop( @@ -636,12 +726,20 @@ impl ConnectOp { ttl: u8, target_connections: usize, connect_forward_estimator: Arc>, + is_gateway: bool, ) -> (Transaction, Self, ConnectMsg) { let mut visited = vec![own.clone()]; push_unique_peer(&mut visited, target.clone()); + // Gateways know their address, NAT peers don't until observed + let joiner = if is_gateway { + Joiner::Known(own.peer()) + } else { + // NAT peer: we only know our public key, not our external address + Joiner::Unknown(own.pub_key.clone()) + }; let request = ConnectRequest { desired_location, - joiner: own.clone(), + joiner, ttl, visited, observed_addr: None, @@ -953,6 +1051,14 @@ impl Operation for ConnectOp { } ConnectMsg::ObservedAddress { address, .. } => { self.handle_observed_address(*address, Instant::now()); + // Update our peer address now that we know our external address. + // This is critical for peers behind NAT who start with a placeholder + // address (127.0.0.1) and need to update it when a gateway observes + // their actual public address. + op_manager + .ring + .connection_manager + .update_peer_address(*address); Ok(store_operation_state(&mut self)) } } @@ -1063,6 +1169,7 @@ pub(crate) async fn join_ring_request( .min(u8::MAX as usize) as u8; let target_connections = op_manager.ring.connection_manager.min_connections; + let is_gateway = op_manager.ring.connection_manager.is_gateway(); let (tx, mut op, msg) = ConnectOp::initiate_join_request( own.clone(), gateway.clone(), @@ -1070,6 +1177,7 @@ pub(crate) async fn join_ring_request( ttl, target_connections, op_manager.connect_forward_estimator.clone(), + is_gateway, ); op.gateway = Some(Box::new(gateway.clone())); @@ -1238,7 +1346,7 @@ mod tests { &self.self_loc } - fn should_accept(&self, _joiner: &PeerKeyLocation) -> bool { + fn should_accept(&self, _joiner: &Joiner) -> bool { self.accept } @@ -1259,6 +1367,11 @@ mod tests { PeerKeyLocation::with_location(keypair.public().clone(), addr, Location::random()) } + /// Helper to create a Joiner::Known from a PeerKeyLocation + fn make_joiner(pkl: &PeerKeyLocation) -> Joiner { + Joiner::Known(pkl.peer()) + } + #[test] fn forward_estimator_handles_missing_location() { let mut estimator = ConnectForwardEstimator::new(); @@ -1299,7 +1412,7 @@ mod tests { upstream: joiner.clone(), request: ConnectRequest { desired_location: Location::random(), - joiner: joiner.clone(), + joiner: make_joiner(&joiner), ttl: 3, visited: vec![], observed_addr: Some(joiner.addr()), @@ -1334,7 +1447,7 @@ mod tests { upstream: joiner.clone(), request: ConnectRequest { desired_location: Location::random(), - joiner: joiner.clone(), + joiner: make_joiner(&joiner), ttl: 2, visited: vec![], observed_addr: Some(joiner.addr()), @@ -1375,7 +1488,7 @@ mod tests { upstream: joiner.clone(), request: ConnectRequest { desired_location: Location::random(), - joiner: joiner.clone(), + joiner: make_joiner(&joiner), ttl: 3, visited: vec![], observed_addr: Some(observed_addr), @@ -1438,6 +1551,7 @@ mod tests { ttl, 2, Arc::new(RwLock::new(ConnectForwardEstimator::new())), + true, // is_gateway for test ); match msg { @@ -1470,7 +1584,7 @@ mod tests { let request = ConnectRequest { desired_location: Location::random(), - joiner: joiner.clone(), + joiner: make_joiner(&joiner), ttl: 3, visited: vec![joiner.clone()], observed_addr: Some(joiner.addr()), @@ -1551,7 +1665,7 @@ mod tests { upstream: joiner.clone(), request: ConnectRequest { desired_location: Location::random(), - joiner: joiner.clone(), + joiner: make_joiner(&joiner), ttl: 3, visited: vec![], observed_addr: Some(observed_public_addr), diff --git a/crates/core/src/ring/connection_manager.rs b/crates/core/src/ring/connection_manager.rs index 8713ec34d..6f11395c3 100644 --- a/crates/core/src/ring/connection_manager.rs +++ b/crates/core/src/ring/connection_manager.rs @@ -300,7 +300,7 @@ impl ConnectionManager { self.peer_key.lock().clone() } - /// Look up a PeerId by socket address from connections_by_location or transient connections. + /// Look up a PeerId by socket address from connections_by_location. pub fn get_peer_by_addr(&self, addr: SocketAddr) -> Option { // Check connections by location let connections = self.connections_by_location.read(); @@ -311,17 +311,6 @@ impl ConnectionManager { } } } - drop(connections); - - // Check transient connections - if let Some((peer, _)) = self - .transient_connections - .iter() - .find(|e| e.key().addr == addr) - .map(|e| (e.key().clone(), e.value().clone())) - { - return Some(peer); - } None } diff --git a/crates/core/src/test_utils.rs b/crates/core/src/test_utils.rs index 5448bc44c..23dee3d06 100644 --- a/crates/core/src/test_utils.rs +++ b/crates/core/src/test_utils.rs @@ -1596,7 +1596,7 @@ async fn count_connection_events(event_log_path: &Path) -> anyhow::Result for event in &events { if let EventKind::Connect(ConnectEvent::Connected { connected, .. }) = &event.kind { // Use the connected peer's ID as the unique identifier - connected_peers.insert(connected.peer.to_string()); + connected_peers.insert(connected.peer().to_string()); } } From 7bd31ccf72c21deb08d1a2e4417c655e5064ff01 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 16:36:54 -0600 Subject: [PATCH 19/32] fix: use socket_addr() and set_addr() for proper address mutation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Copilot correctly identified that sender_mut.peer().addr = remote_addr did nothing because peer() returns a PeerId by value, not a reference. Use socket_addr() to safely check the address and set_addr() to mutate. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/node/network_bridge/p2p_protoc.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index 82af379d0..7fb6e26e0 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -2137,8 +2137,11 @@ impl P2pConnManager { if let Some(sender_mut) = extract_sender_from_message_mut(&mut inbound.msg) { - if sender_mut.peer().addr.ip().is_unspecified() { - sender_mut.peer().addr = remote_addr; + if sender_mut + .socket_addr() + .is_some_and(|a| a.ip().is_unspecified()) + { + sender_mut.set_addr(remote_addr); } } } From 3a9254e23f80e86111bc44d96d38ded11c8ea885 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 17:32:34 -0600 Subject: [PATCH 20/32] fix: route connect messages through upstream for NAT relay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ObservedAddress and ConnectResponse messages must route through the upstream connection (source_addr) rather than directly to target addresses, since relay peers may not have direct connections to joiners. This mirrors the fix from PR #2171 that addressed the same NAT routing issue for different message types. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/operations/connect.rs | 30 ++++++++++++++++++++------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index d654be30c..7fb6518a1 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -886,7 +886,7 @@ impl Operation for ConnectOp { network_bridge: &'a mut NB, op_manager: &'a OpManager, msg: &'a Self::Message, - _source_addr: Option, + source_addr: Option, ) -> std::pin::Pin< Box> + Send + 'a>, > { @@ -907,9 +907,16 @@ impl Operation for ConnectOp { target: target.clone(), address, }; - network_bridge - .send(target.addr(), NetMessage::V1(NetMessageV1::Connect(msg))) - .await?; + // Route through upstream (where the request came from) since we may + // not have a direct connection to the target + if let Some(upstream) = &source_addr { + network_bridge + .send( + upstream.socket_addr(), + NetMessage::V1(NetMessageV1::Connect(msg)), + ) + .await?; + } } if let Some(peer) = actions.expect_connection_from { @@ -947,10 +954,17 @@ impl Operation for ConnectOp { target: response_target, payload: response, }; - return Ok(store_operation_state_with_msg( - &mut self, - Some(response_msg), - )); + // Route the response through upstream (where the request came from) + // since we may not have a direct connection to the joiner + if let Some(upstream) = &source_addr { + network_bridge + .send( + upstream.socket_addr(), + NetMessage::V1(NetMessageV1::Connect(response_msg)), + ) + .await?; + } + return Ok(store_operation_state(&mut self)); } Ok(store_operation_state(&mut self)) From f9b81b10e688a436dcbf36bff2ad554558d3f833 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 18:13:12 -0600 Subject: [PATCH 21/32] fix: allow Get::ReturnGet messages to be requeued locally MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Get operation synthesizes ReturnGet messages back to itself for retry/fallback logic. With connection-based routing (upstream_addr), these local messages have target_addr: None. The previous debug_assert only allowed Update::Broadcasting for local re-queuing, causing a panic. Expanded the assertion to also allow Get::ReturnGet messages. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/operations/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/core/src/operations/mod.rs b/crates/core/src/operations/mod.rs index b72e252f0..6462c5aeb 100644 --- a/crates/core/src/operations/mod.rs +++ b/crates/core/src/operations/mod.rs @@ -177,9 +177,11 @@ where msg, NetMessage::V1(NetMessageV1::Update( crate::operations::update::UpdateMsg::Broadcasting { .. } + )) | NetMessage::V1(NetMessageV1::Get( + crate::operations::get::GetMsg::ReturnGet { .. } )) ), - "Only Update::Broadcasting messages should be re-queued locally" + "Only Update::Broadcasting and Get::ReturnGet messages should be re-queued locally" ); op_manager.notify_op_change(msg, updated_state).await?; return Err(OpError::StatePushed); From 0131c7b444bb2ca79711687e3580279b7e0ca505 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 20:36:02 -0600 Subject: [PATCH 22/32] fix: run connection monitoring concurrently with event listener MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The aggressive_initial_connections phase was blocking before the event listener started, preventing any connections from being established. The fix spawns connection monitoring as a background task that runs concurrently with run_event_listener, which is what binds the UDP socket and processes incoming messages. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/node/p2p_impl.rs | 117 ++++++------------------------- 1 file changed, 23 insertions(+), 94 deletions(-) diff --git a/crates/core/src/node/p2p_impl.rs b/crates/core/src/node/p2p_impl.rs index 3fcaa39c5..82a6f317a 100644 --- a/crates/core/src/node/p2p_impl.rs +++ b/crates/core/src/node/p2p_impl.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, convert::Infallible, sync::Arc, time::Duration}; +use std::{convert::Infallible, sync::Arc, time::Duration}; use futures::{future::BoxFuture, FutureExt}; use tokio::task::JoinHandle; @@ -21,12 +21,9 @@ use crate::{ self, ContractHandler, ContractHandlerChannel, ExecutorToEventLoopChannel, NetworkEventListenerHalve, WaitingResolution, }, - message::{NetMessage, NetMessageV1, NodeEvent}, + message::NodeEvent, node::NodeConfig, - operations::{ - connect::{self, ConnectOp}, - OpEnum, - }, + operations::connect::{self}, }; use super::OpManager; @@ -49,9 +46,10 @@ pub(crate) struct NodeP2P { } impl NodeP2P { - /// Aggressively establish connections during startup to avoid on-demand delays - async fn aggressive_initial_connections(&self) { - let min_connections = self.op_manager.ring.connection_manager.min_connections; + /// Aggressively monitor connections during startup. + /// This is a static version that can be spawned as a background task. + async fn aggressive_initial_connections_static(op_manager: &OpManager) { + let min_connections = op_manager.ring.connection_manager.min_connections; tracing::info!( "Starting aggressive connection acquisition phase (target: {} connections)", @@ -62,15 +60,13 @@ impl NodeP2P { // to avoid the 10+ second delays on first GET operations let start = std::time::Instant::now(); let max_duration = Duration::from_secs(10); - let mut last_connection_count = 0; - let mut stable_rounds = 0; while start.elapsed() < max_duration { // Cooperative yielding for CI environments with limited CPU cores // Research shows CI (2 cores) needs explicit yields to prevent task starvation tokio::task::yield_now().await; - let current_connections = self.op_manager.ring.open_connections(); + let current_connections = op_manager.ring.open_connections(); // If we've reached our target, we're done if current_connections >= min_connections { @@ -82,33 +78,6 @@ impl NodeP2P { break; } - // If connection count is stable for 3 rounds, actively trigger more connections - if current_connections == last_connection_count { - stable_rounds += 1; - if stable_rounds >= 3 && current_connections > 0 { - tracing::info!( - "Connection count stable at {}, triggering active peer discovery", - current_connections - ); - - // Trigger the connection maintenance task to actively look for more peers - // In small networks, we want to be more aggressive - for _ in 0..3 { - // Yield before each connection attempt to prevent blocking other tasks - tokio::task::yield_now().await; - - if let Err(e) = self.trigger_connection_maintenance().await { - tracing::warn!("Failed to trigger connection maintenance: {}", e); - } - tokio::time::sleep(Duration::from_millis(100)).await; - } - stable_rounds = 0; - } - } else { - stable_rounds = 0; - last_connection_count = current_connections; - } - tracing::debug!( "Current connections: {}/{}, waiting for more peers (elapsed: {}s)", current_connections, @@ -125,7 +94,7 @@ impl NodeP2P { tokio::time::sleep(sleep_duration).await; } - let final_connections = self.op_manager.ring.open_connections(); + let final_connections = op_manager.ring.open_connections(); tracing::info!( "Aggressive connection phase complete. Final connections: {}/{} (took {}s)", final_connections, @@ -134,56 +103,6 @@ impl NodeP2P { ); } - /// Trigger the connection maintenance task to actively look for more peers - async fn trigger_connection_maintenance(&self) -> anyhow::Result<()> { - let ideal_location = Location::random(); - - // Find a connected peer to query - let query_target = { - let router = self.op_manager.ring.router.read(); - self.op_manager.ring.connection_manager.routing( - ideal_location, - None, - &HashSet::::new(), - &router, - ) - }; - - if let Some(query_target) = query_target { - let joiner = self.op_manager.ring.connection_manager.own_location(); - let ttl = self - .op_manager - .ring - .max_hops_to_live - .max(1) - .min(u8::MAX as usize) as u8; - let target_connections = self.op_manager.ring.connection_manager.min_connections; - - let (tx, op, msg) = ConnectOp::initiate_join_request( - joiner, - query_target.clone(), - ideal_location, - ttl, - target_connections, - self.op_manager.connect_forward_estimator.clone(), - ); - - tracing::debug!( - %tx, - query_peer = %query_target.peer(), - %ideal_location, - "Triggering connection maintenance connect request" - ); - self.op_manager - .notify_op_change( - NetMessage::V1(NetMessageV1::Connect(msg)), - OpEnum::Connect(Box::new(op)), - ) - .await?; - } - - Ok(()) - } pub(super) async fn run_node(mut self) -> anyhow::Result { if self.should_try_connect { let join_handle = connect::initial_join_procedure( @@ -192,12 +111,22 @@ impl NodeP2P { ) .await?; self.initial_join_task = Some(join_handle); - - // After connecting to gateways, aggressively try to reach min_connections - // This is important for fast startup and avoiding on-demand connection delays - self.aggressive_initial_connections().await; } + // Spawn aggressive connection acquisition as a background task. + // This MUST run concurrently with the event listener, not before it, + // because the event listener is what binds the UDP socket and processes + // incoming messages - without it running, no connections can be established. + let op_manager_for_connections = self.op_manager.clone(); + let should_try_connect = self.should_try_connect; + tokio::spawn(async move { + if should_try_connect { + // Small delay to ensure event listener has started + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + Self::aggressive_initial_connections_static(&op_manager_for_connections).await; + } + }); + let f = self.conn_manager.run_event_listener( self.op_manager.clone(), self.client_wait_for_transaction, From 58dd1a34255918b4cc2465688f9e7b5da1be027d Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 21:19:54 -0600 Subject: [PATCH 23/32] fix: only return connected peers from k_closest_potentially_caching MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fallback to known_locations was returning peers that we're not currently connected to. Attempting to route messages to those peers requires establishing a new connection, which fails in NAT scenarios without coordination from both sides. Now k_closest_potentially_caching only returns peers we have active connections to, ensuring routing attempts will succeed. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/ring/connection_manager.rs | 4 ---- crates/core/src/ring/mod.rs | 21 +++++---------------- 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/crates/core/src/ring/connection_manager.rs b/crates/core/src/ring/connection_manager.rs index 6f11395c3..b2188e0d0 100644 --- a/crates/core/src/ring/connection_manager.rs +++ b/crates/core/src/ring/connection_manager.rs @@ -518,10 +518,6 @@ impl ConnectionManager { self.connections_by_location.read().clone() } - pub(super) fn get_known_locations(&self) -> BTreeMap { - self.location_for_peer.read().clone() - } - /// Route an op to the most optimal target. pub fn routing( &self, diff --git a/crates/core/src/ring/mod.rs b/crates/core/src/ring/mod.rs index 411a4d4f5..14a32fed5 100644 --- a/crates/core/src/ring/mod.rs +++ b/crates/core/src/ring/mod.rs @@ -307,22 +307,11 @@ impl Ring { } } - if candidates.len() < k { - let known_locations = self.connection_manager.get_known_locations(); - for (peer, location) in known_locations { - if skip_list.has_element(peer.clone()) || !seen.insert(peer.clone()) { - continue; - } - candidates.push(PeerKeyLocation::with_location( - peer.pub_key.clone(), - peer.addr, - location, - )); - if candidates.len() >= k { - break; - } - } - } + // Note: We intentionally do NOT fall back to known_locations here. + // known_locations may contain peers we're not currently connected to, + // and attempting to route to them would require establishing a new connection + // which may fail (especially in NAT scenarios without coordination). + // It's better to return fewer candidates than unreachable ones. router .select_k_best_peers(candidates.iter(), target_location, k) From ded8e4cfb462c859b52ed13b5ac73ee7d78d8e47 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sat, 29 Nov 2025 22:19:57 -0600 Subject: [PATCH 24/32] fix: use pub_key() and socket_addr() in connect operation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Use pub_key() instead of peer() in acceptance tracing to avoid panics when the acceptor's PeerAddr is Unknown (NAT scenario) - Call socket_addr() on ObservedAddr when sending to network_bridge 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/operations/connect.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index 7fb6518a1..d339a0222 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -391,8 +391,8 @@ impl RelayState { // Use the joiner with updated observed address for response routing actions.response_target = Some(joiner_pkl.clone()); tracing::info!( - acceptor_peer = %acceptor.peer(), - joiner_peer = %joiner_pkl.peer(), + acceptor_pub_key = %acceptor.pub_key(), + joiner_pub_key = %joiner_pkl.pub_key(), acceptor_loc = ?acceptor.location, joiner_loc = ?joiner_pkl.location, ring_distance = ?dist, @@ -909,7 +909,7 @@ impl Operation for ConnectOp { }; // Route through upstream (where the request came from) since we may // not have a direct connection to the target - if let Some(upstream) = &source_addr { + if let Some(upstream) = source_addr { network_bridge .send( upstream.socket_addr(), @@ -956,7 +956,7 @@ impl Operation for ConnectOp { }; // Route the response through upstream (where the request came from) // since we may not have a direct connection to the joiner - if let Some(upstream) = &source_addr { + if let Some(upstream) = source_addr { network_bridge .send( upstream.socket_addr(), From d91ca3a1014618e6acfb4e9cb36310df95802afd Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sun, 30 Nov 2025 17:08:01 -0600 Subject: [PATCH 25/32] fix: remove reverted update_peer_address call and add missing is_gateway param MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The update_peer_address method was added by PR #2158, which was reverted. Remove the call and add the missing is_gateway parameter to initiate_join_request. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/operations/connect.rs | 8 -------- crates/core/src/ring/mod.rs | 1 + 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index d339a0222..f63818e0d 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -1065,14 +1065,6 @@ impl Operation for ConnectOp { } ConnectMsg::ObservedAddress { address, .. } => { self.handle_observed_address(*address, Instant::now()); - // Update our peer address now that we know our external address. - // This is critical for peers behind NAT who start with a placeholder - // address (127.0.0.1) and need to update it when a gateway observes - // their actual public address. - op_manager - .ring - .connection_manager - .update_peer_address(*address); Ok(store_operation_state(&mut self)) } } diff --git a/crates/core/src/ring/mod.rs b/crates/core/src/ring/mod.rs index 14a32fed5..62061ea7f 100644 --- a/crates/core/src/ring/mod.rs +++ b/crates/core/src/ring/mod.rs @@ -653,6 +653,7 @@ impl Ring { ttl, target_connections, op_manager.connect_forward_estimator.clone(), + false, // is_gateway: connection_maintenance is for peers, not gateways ); live_tx_tracker.add_transaction(query_target.addr(), tx); From 85001cc5e7f2a17c531f55d793371fc725b55a1e Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sun, 30 Nov 2025 17:42:56 -0600 Subject: [PATCH 26/32] ci: retrigger CI From 766e53d697f39a6799e44f773e28763a2731e193 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sun, 30 Nov 2025 17:53:12 -0600 Subject: [PATCH 27/32] fix: use unique temp dir in test_serde_config_args MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prevents "Permission denied" errors on self-hosted runners where /tmp/freenet may have stale permissions from previous test runs. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/config/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/core/src/config/mod.rs b/crates/core/src/config/mod.rs index 050b526ad..ff5f50070 100644 --- a/crates/core/src/config/mod.rs +++ b/crates/core/src/config/mod.rs @@ -1257,8 +1257,11 @@ mod tests { #[tokio::test] async fn test_serde_config_args() { + // Use a unique ID to avoid conflicts with other tests or stale /tmp/freenet directories + let unique_id = format!("test-serde-{}", std::process::id()); let args = ConfigArgs { mode: Some(OperationMode::Local), + id: Some(unique_id), ..Default::default() }; let cfg = args.build().await.unwrap(); From 8e1b4b7e094eec090ecbadb024d49de710d92f95 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sun, 30 Nov 2025 20:00:39 -0600 Subject: [PATCH 28/32] perf(test): cache keypair in PeerKeyLocation::random() to speed up tests The refactoring removed the thread-local keypair cache that was previously in PeerId::random(). This caused test_topology to take ~17 minutes as it generates 5,000 PeerKeyLocations, each requiring expensive crypto key generation. Restore the caching pattern to generate only one keypair per thread. [AI-assisted - Claude] --- crates/core/src/ring/peer_key_location.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/crates/core/src/ring/peer_key_location.rs b/crates/core/src/ring/peer_key_location.rs index e04781647..30df94f39 100644 --- a/crates/core/src/ring/peer_key_location.rs +++ b/crates/core/src/ring/peer_key_location.rs @@ -174,15 +174,31 @@ impl PeerKeyLocation { #[cfg(test)] pub fn random() -> Self { - use crate::transport::TransportKeypair; + use crate::transport::{TransportKeypair, TransportPublicKey}; use rand::Rng; + use std::cell::RefCell; + + // Cache the keypair per thread to avoid expensive key generation in tests + thread_local! { + static CACHED_KEY: RefCell> = const { RefCell::new(None) }; + } let mut addr_bytes = [0u8; 4]; rand::rng().fill(&mut addr_bytes[..]); let port = crate::util::get_free_port().unwrap(); let addr = SocketAddr::from((addr_bytes, port)); - let pub_key = TransportKeypair::new().public().clone(); + let pub_key = CACHED_KEY.with(|cached| { + let mut cached = cached.borrow_mut(); + match &*cached { + Some(k) => k.clone(), + None => { + let key = TransportKeypair::new().public().clone(); + cached.replace(key.clone()); + key + } + } + }); PeerKeyLocation { pub_key, From 848656fe114acfb68640ecb2636bd11bdf30f977 Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sun, 30 Nov 2025 20:16:57 -0600 Subject: [PATCH 29/32] ci: retry CI [AI-assisted - Claude] From 37bee614712b4da20b3fc491f01c3204cab034fc Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Sun, 30 Nov 2025 20:28:12 -0600 Subject: [PATCH 30/32] fix: eliminate port reservation race condition in test infrastructure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Problem Tests were intermittently failing with "Address already in use (os error 98)" because the port reservation mechanism had a race condition: 1. reserve_local_port() bound a TCP listener, got a port, then immediately dropped it 2. The port was released back to the OS 3. Between release and when the test node actually bound to the port, another process could claim it Additionally, the reservation used TCP but nodes bind UDP, so a port could be free for TCP but already in use for UDP. ## Solution - Reserve both UDP (primary for Freenet nodes) and TCP (for WebSocket API) ports - Keep the sockets alive in a global RESERVED_SOCKETS map until explicitly released - Release ports just before node startup in the test macro to minimize the race window - Store port variables at outer scope so they're available for both config creation and later release The key insight is that we need to hold the port reservation until the moment before the node binds, then release it to allow the node to bind to that exact port. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- crates/core/src/test_utils.rs | 37 +++++++++++++++++++++------- crates/freenet-macros/src/codegen.rs | 30 +++++++++++++++------- 2 files changed, 49 insertions(+), 18 deletions(-) diff --git a/crates/core/src/test_utils.rs b/crates/core/src/test_utils.rs index 23dee3d06..89b549acb 100644 --- a/crates/core/src/test_utils.rs +++ b/crates/core/src/test_utils.rs @@ -8,7 +8,7 @@ use std::{ }; use clap::ValueEnum; -use dashmap::DashSet; +use dashmap::{DashMap, DashSet}; use freenet_stdlib::{ client_api::{ClientRequest, ContractRequest, WebApi}, prelude::*, @@ -815,25 +815,40 @@ mod test { // Port reservation utilities for integration tests static RESERVED_PORTS: Lazy> = Lazy::new(DashSet::new); +/// Holds sockets to keep ports reserved until explicitly released. +/// The tuple stores (UdpSocket, TcpListener) for each port. +static RESERVED_SOCKETS: Lazy> = + Lazy::new(DashMap::new); /// Reserve a unique localhost TCP port for tests. /// -/// Ports are allocated by binding to an ephemeral listener to ensure the port -/// is currently free, then tracked in a global set so concurrent tests do not -/// reuse the same value. Ports remain reserved until released via -/// [`release_local_port`]. +/// Ports are allocated by binding to both UDP and TCP on an ephemeral port to +/// ensure the port is currently free for both protocols, then tracked in a +/// global set so concurrent tests do not reuse the same value. Ports remain +/// reserved until released via [`release_local_port`]. +/// +/// The sockets are kept alive in a global map to prevent the OS from +/// reassigning the port before the test node binds to it. pub fn reserve_local_port() -> anyhow::Result { const MAX_ATTEMPTS: usize = 128; for _ in 0..MAX_ATTEMPTS { - let listener = std::net::TcpListener::bind(("127.0.0.1", 0)) - .map_err(|e| anyhow::anyhow!("failed to bind ephemeral port: {e}"))?; - let port = listener + // Bind UDP first since that's what Freenet nodes primarily use + let udp_socket = std::net::UdpSocket::bind(("127.0.0.1", 0)) + .map_err(|e| anyhow::anyhow!("failed to bind ephemeral UDP port: {e}"))?; + let port = udp_socket .local_addr() .map_err(|e| anyhow::anyhow!("failed to read ephemeral port address: {e}"))? .port(); - drop(listener); + + // Also bind TCP on the same port for WebSocket listeners + let tcp_listener = match std::net::TcpListener::bind(("127.0.0.1", port)) { + Ok(l) => l, + Err(_) => continue, // Port available for UDP but not TCP, try another + }; if RESERVED_PORTS.insert(port) { + // Keep sockets alive to prevent OS from reassigning the port + RESERVED_SOCKETS.insert(port, (udp_socket, tcp_listener)); return Ok(port); } } @@ -844,8 +859,12 @@ pub fn reserve_local_port() -> anyhow::Result { } /// Release a previously reserved port so future tests may reuse it. +/// +/// This drops the held sockets, allowing the OS to reclaim the port. pub fn release_local_port(port: u16) { RESERVED_PORTS.remove(&port); + // Dropping the sockets releases the port back to the OS + RESERVED_SOCKETS.remove(&port); } // Test context for integration tests diff --git a/crates/freenet-macros/src/codegen.rs b/crates/freenet-macros/src/codegen.rs index 3f29221bc..a0e7be73e 100644 --- a/crates/freenet-macros/src/codegen.rs +++ b/crates/freenet-macros/src/codegen.rs @@ -196,7 +196,11 @@ fn generate_node_setup(args: &FreenetTestArgs) -> TokenStream { if is_gw { // Gateway node configuration let location_expr = node_location(args, idx, node_label); + let network_port_var = format_ident!("network_port_{}", idx); + let ws_port_var_local = format_ident!("ws_port_{}", idx); setup_code.push(quote! { + let #network_port_var = freenet::test_utils::reserve_local_port()?; + let #ws_port_var_local = freenet::test_utils::reserve_local_port()?; let (#config_var, #temp_var) = { let temp_dir = tempfile::tempdir()?; let key = freenet::dev_tool::TransportKeypair::new(); @@ -204,8 +208,8 @@ fn generate_node_setup(args: &FreenetTestArgs) -> TokenStream { key.save(&transport_keypair)?; key.public().save(temp_dir.path().join("public.pem"))?; - let network_port = freenet::test_utils::reserve_local_port()?; - let ws_port = freenet::test_utils::reserve_local_port()?; + let network_port = #network_port_var; + let ws_port = #ws_port_var_local; let location: f64 = #location_expr; @@ -311,7 +315,11 @@ fn generate_node_setup(args: &FreenetTestArgs) -> TokenStream { }; // Peer node configuration + let network_port_var = format_ident!("network_port_{}", idx); + let ws_port_var_local = format_ident!("ws_port_{}", idx); setup_code.push(quote! { + let #network_port_var = freenet::test_utils::reserve_local_port()?; + let #ws_port_var_local = freenet::test_utils::reserve_local_port()?; let (#config_var, #temp_var) = { let temp_dir = tempfile::tempdir()?; let key = freenet::dev_tool::TransportKeypair::new(); @@ -319,8 +327,8 @@ fn generate_node_setup(args: &FreenetTestArgs) -> TokenStream { key.save(&transport_keypair)?; key.public().save(temp_dir.path().join("public.pem"))?; - let network_port = freenet::test_utils::reserve_local_port()?; - let ws_port = freenet::test_utils::reserve_local_port()?; + let network_port = #network_port_var; + let ws_port = #ws_port_var_local; let location: f64 = #location_expr; @@ -397,9 +405,14 @@ fn generate_node_builds(args: &FreenetTestArgs) -> TokenStream { let node_var = format_ident!("node_{}", idx); let flush_handle_var = format_ident!("flush_handle_{}", idx); let config_var = format_ident!("config_{}", idx); + let network_port_var = format_ident!("network_port_{}", idx); + let ws_port_var = format_ident!("ws_port_{}", idx); builds.push(quote! { tracing::info!("Building node: {}", #node_label); + // Release reserved ports just before binding to minimize race window + freenet::test_utils::release_local_port(#network_port_var); + freenet::test_utils::release_local_port(#ws_port_var); let built_config = #config_var.build().await?; let mut node_config = freenet::local_node::NodeConfig::new(built_config.clone()).await?; #connection_tuning @@ -444,18 +457,17 @@ fn generate_node_tasks(args: &FreenetTestArgs) -> TokenStream { } /// Extract values from configs before they're moved +/// Note: ws_port and network_port are already defined at top level by generate_config_setup, +/// so we only need to extract location here. fn generate_value_extraction(args: &FreenetTestArgs) -> TokenStream { let mut value_extractions = Vec::new(); for (idx, _node_label) in args.nodes.iter().enumerate() { let config_var = format_ident!("config_{}", idx); - let ws_port_var = format_ident!("ws_port_{}", idx); - let network_port_var = format_ident!("network_port_{}", idx); let location_var = format_ident!("location_{}", idx); value_extractions.push(quote! { - let #ws_port_var = #config_var.ws_api.ws_api_port.unwrap(); - let #network_port_var = #config_var.network_api.public_port; + // ws_port_X and network_port_X already exist from reserve_local_port calls let #location_var = #config_var.network_api.location.unwrap(); }); } @@ -483,7 +495,7 @@ fn generate_context_creation_with_handles(args: &FreenetTestArgs) -> TokenStream label: #node_label.to_string(), temp_dir_path: #temp_var.path().to_path_buf(), ws_port: #ws_port_var, - network_port: #network_port_var, + network_port: Some(#network_port_var), is_gateway: #is_gw, location: #location_var, } From aaf3e3bbcd7a9a6189d28c892dfe2f0063a0affe Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Mon, 1 Dec 2025 13:18:41 -0600 Subject: [PATCH 31/32] refactor: tighten address typing for connect/network bridge --- crates/core/src/node/network_bridge.rs | 5 +- .../core/src/node/network_bridge/in_memory.rs | 11 ++-- .../src/node/network_bridge/p2p_protoc.rs | 20 +++---- crates/core/src/operations/connect.rs | 53 +++++++++---------- crates/core/src/operations/mod.rs | 11 ++-- crates/core/src/operations/put.rs | 16 ++++-- crates/core/src/operations/update.rs | 7 ++- crates/core/src/transport/mod.rs | 4 +- 8 files changed, 68 insertions(+), 59 deletions(-) diff --git a/crates/core/src/node/network_bridge.rs b/crates/core/src/node/network_bridge.rs index d47a955be..482d32c69 100644 --- a/crates/core/src/node/network_bridge.rs +++ b/crates/core/src/node/network_bridge.rs @@ -15,6 +15,7 @@ use serde::{Deserialize, Serialize}; use tokio::sync::mpsc::{self, Receiver, Sender}; use crate::message::{NetMessage, NodeEvent}; +use crate::transport::ObservedAddr; mod handshake; pub(crate) mod in_memory; @@ -32,12 +33,12 @@ pub(crate) type ConnResult = std::result::Result; pub(crate) trait NetworkBridge: Send + Sync { fn drop_connection( &mut self, - peer_addr: SocketAddr, + peer_addr: ObservedAddr, ) -> impl Future> + Send; fn send( &self, - target_addr: SocketAddr, + target_addr: ObservedAddr, msg: NetMessage, ) -> impl Future> + Send; } diff --git a/crates/core/src/node/network_bridge/in_memory.rs b/crates/core/src/node/network_bridge/in_memory.rs index ff12ad845..d223b3fda 100644 --- a/crates/core/src/node/network_bridge/in_memory.rs +++ b/crates/core/src/node/network_bridge/in_memory.rs @@ -2,7 +2,6 @@ use std::{ collections::HashMap, io::Cursor, - net::SocketAddr, sync::{Arc, LazyLock}, time::{Duration, Instant}, }; @@ -17,6 +16,7 @@ use crate::{ message::NetMessage, node::{testing_impl::NetworkBridgeExt, NetEventRegister, OpManager, PeerId}, tracing::NetEventLog, + transport::ObservedAddr, }; #[derive(Clone)] @@ -61,19 +61,22 @@ impl MemoryConnManager { } impl NetworkBridge for MemoryConnManager { - async fn send(&self, target_addr: SocketAddr, msg: NetMessage) -> super::ConnResult<()> { + async fn send(&self, target_addr: ObservedAddr, msg: NetMessage) -> super::ConnResult<()> { self.log_register .register_events(NetEventLog::from_outbound_msg(&msg, &self.op_manager.ring)) .await; // Create a temporary PeerId for tracking (in-memory transport uses PeerId internally) - let target_peer = PeerId::new(target_addr, self.transport.interface_peer.pub_key.clone()); + let target_peer = PeerId::new( + target_addr.socket_addr(), + self.transport.interface_peer.pub_key.clone(), + ); self.op_manager.sending_transaction(&target_peer, &msg); let msg = bincode::serialize(&msg)?; self.transport.send(target_peer, msg); Ok(()) } - async fn drop_connection(&mut self, _peer_addr: SocketAddr) -> super::ConnResult<()> { + async fn drop_connection(&mut self, _peer_addr: ObservedAddr) -> super::ConnResult<()> { Ok(()) } } diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index 7fb6e26e0..f549ab9ff 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -76,19 +76,19 @@ impl P2pBridge { } impl NetworkBridge for P2pBridge { - async fn drop_connection(&mut self, peer_addr: SocketAddr) -> super::ConnResult<()> { + async fn drop_connection(&mut self, peer_addr: ObservedAddr) -> super::ConnResult<()> { // Find the peer by address and remove it let peer = self .accepted_peers .iter() - .find(|p| p.addr == peer_addr) + .find(|p| p.addr == peer_addr.socket_addr()) .map(|p| p.clone()); if let Some(peer) = peer { self.accepted_peers.remove(&peer); self.ev_listener_tx - .send(Right(NodeEvent::DropConnection(peer_addr))) + .send(Right(NodeEvent::DropConnection(peer_addr.socket_addr()))) .await - .map_err(|_| ConnectionError::SendNotCompleted(peer_addr))?; + .map_err(|_| ConnectionError::SendNotCompleted(peer_addr.socket_addr()))?; self.log_register .register_events(Either::Left(NetEventLog::disconnected( &self.op_manager.ring, @@ -99,7 +99,7 @@ impl NetworkBridge for P2pBridge { Ok(()) } - async fn send(&self, target_addr: SocketAddr, msg: NetMessage) -> super::ConnResult<()> { + async fn send(&self, target_addr: ObservedAddr, msg: NetMessage) -> super::ConnResult<()> { self.log_register .register_events(NetEventLog::from_outbound_msg(&msg, &self.op_manager.ring)) .await; @@ -107,14 +107,14 @@ impl NetworkBridge for P2pBridge { let target = self .accepted_peers .iter() - .find(|p| p.addr == target_addr) + .find(|p| p.addr == target_addr.socket_addr()) .map(|p| p.clone()); if let Some(ref target) = target { self.op_manager.sending_transaction(target, &msg); self.ev_listener_tx .send(Left((target.clone(), Box::new(msg)))) .await - .map_err(|_| ConnectionError::SendNotCompleted(target_addr))?; + .map_err(|_| ConnectionError::SendNotCompleted(target_addr.socket_addr()))?; } else { // No known peer at this address - create a temporary PeerId for the event // This should rarely happen in practice @@ -123,13 +123,13 @@ impl NetworkBridge for P2pBridge { "Sending to unknown peer address - creating temporary PeerId" ); let temp_peer = PeerId::new( - target_addr, + target_addr.socket_addr(), (*self.op_manager.ring.connection_manager.pub_key).clone(), ); self.ev_listener_tx .send(Left((temp_peer, Box::new(msg)))) .await - .map_err(|_| ConnectionError::SendNotCompleted(target_addr))?; + .map_err(|_| ConnectionError::SendNotCompleted(target_addr.socket_addr()))?; } Ok(()) } @@ -381,7 +381,7 @@ impl P2pConnManager { ) = (remote, &mut msg) { if payload.observed_addr.is_none() { - payload.observed_addr = Some(remote_addr); + payload.observed_addr = Some(ObservedAddr::new(remote_addr)); } } // Pass the source address through to operations for routing. diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index f63818e0d..ac7008768 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -134,8 +134,8 @@ pub(crate) struct ConnectRequest { pub ttl: u8, /// Simple visited set to avoid trivial loops. pub visited: Vec, - /// Socket observed by the gateway/relay for the joiner, if known. - pub observed_addr: Option, + /// Address observed by the gateway/relay for the joiner, if known. + pub observed_addr: Option, } /// Acceptance payload returned by candidates. @@ -184,15 +184,15 @@ impl Joiner { } /// Upgrades an Unknown joiner to Known once we observe their address. - pub fn with_observed_address(&self, addr: SocketAddr) -> Self { + pub fn with_observed_address(&self, addr: ObservedAddr) -> Self { match self { - Joiner::Unknown(key) => Joiner::Known(PeerId::new(addr, key.clone())), + Joiner::Unknown(key) => Joiner::Known(PeerId::new(addr.socket_addr(), key.clone())), Joiner::Known(peer_id) => { // Avoid allocation if address hasn't changed - if peer_id.addr == addr { + if peer_id.addr == addr.socket_addr() { self.clone() } else { - Joiner::Known(PeerId::new(addr, peer_id.pub_key.clone())) + Joiner::Known(PeerId::new(addr.socket_addr(), peer_id.pub_key.clone())) } } } @@ -243,7 +243,7 @@ pub(crate) enum ConnectState { #[derive(Debug, Clone)] pub(crate) struct JoinerState { pub target_connections: usize, - pub observed_address: Option, + pub observed_address: Option, pub accepted: HashSet, pub last_progress: Instant, } @@ -283,7 +283,7 @@ pub(crate) struct RelayActions { pub accept_response: Option, pub expect_connection_from: Option, pub forward: Option<(PeerKeyLocation, ConnectRequest)>, - pub observed_address: Option<(PeerKeyLocation, SocketAddr)>, + pub observed_address: Option<(PeerKeyLocation, ObservedAddr)>, /// The target to send the ConnectResponse to (with observed external address). pub response_target: Option, } @@ -588,7 +588,7 @@ impl JoinerState { acceptance } - pub(crate) fn update_observed_address(&mut self, address: SocketAddr, now: Instant) { + pub(crate) fn update_observed_address(&mut self, address: ObservedAddr, now: Instant) { self.observed_address = Some(address); self.last_progress = now; } @@ -639,7 +639,7 @@ impl ConnectOp { id: Transaction, desired_location: Location, target_connections: usize, - observed_address: Option, + observed_address: Option, gateway: Option, backoff: Option, connect_forward_estimator: Arc>, @@ -750,7 +750,7 @@ impl ConnectOp { tx, desired_location, target_connections, - Some(own.addr()), + Some(ObservedAddr::new(own.addr())), Some(target.clone()), None, connect_forward_estimator, @@ -793,7 +793,7 @@ impl ConnectOp { pub(crate) fn handle_observed_address(&mut self, address: SocketAddr, now: Instant) { if let Some(ConnectState::WaitingForResponses(state)) = self.state.as_mut() { - state.update_observed_address(address, now); + state.update_observed_address(ObservedAddr::new(address), now); } } @@ -905,16 +905,13 @@ impl Operation for ConnectOp { let msg = ConnectMsg::ObservedAddress { id: self.id, target: target.clone(), - address, + address: address.socket_addr(), }; // Route through upstream (where the request came from) since we may // not have a direct connection to the target if let Some(upstream) = source_addr { network_bridge - .send( - upstream.socket_addr(), - NetMessage::V1(NetMessageV1::Connect(msg)), - ) + .send(upstream, NetMessage::V1(NetMessageV1::Connect(msg))) .await?; } } @@ -938,7 +935,7 @@ impl Operation for ConnectOp { }; network_bridge .send( - next.addr(), + ObservedAddr::new(next.addr()), NetMessage::V1(NetMessageV1::Connect(forward_msg)), ) .await?; @@ -959,7 +956,7 @@ impl Operation for ConnectOp { if let Some(upstream) = source_addr { network_bridge .send( - upstream.socket_addr(), + upstream, NetMessage::V1(NetMessageV1::Connect(response_msg)), ) .await?; @@ -1054,7 +1051,7 @@ impl Operation for ConnectOp { }; network_bridge .send( - upstream.addr(), + ObservedAddr::new(upstream.addr()), NetMessage::V1(NetMessageV1::Connect(forward_msg)), ) .await?; @@ -1421,7 +1418,7 @@ mod tests { joiner: make_joiner(&joiner), ttl: 3, visited: vec![], - observed_addr: Some(joiner.addr()), + observed_addr: Some(ObservedAddr::new(joiner.addr())), }, forwarded_to: None, observed_sent: false, @@ -1456,7 +1453,7 @@ mod tests { joiner: make_joiner(&joiner), ttl: 2, visited: vec![], - observed_addr: Some(joiner.addr()), + observed_addr: Some(ObservedAddr::new(joiner.addr())), }, forwarded_to: None, observed_sent: false, @@ -1486,10 +1483,10 @@ mod tests { fn relay_emits_observed_address_for_private_joiner() { let self_loc = make_peer(4050); let joiner = make_peer(5050); - let observed_addr = SocketAddr::new( + let observed_addr = ObservedAddr::new(SocketAddr::new( IpAddr::V4(Ipv4Addr::new(203, 0, 113, 10)), joiner.addr().port(), - ); + )); let mut state = RelayState { upstream: joiner.clone(), request: ConnectRequest { @@ -1515,14 +1512,14 @@ mod tests { .observed_address .expect("expected observed address update"); assert_eq!(addr, observed_addr); - assert_eq!(target.addr(), observed_addr); + assert_eq!(target.addr(), observed_addr.socket_addr()); // After handling, the joiner should be upgraded to Known with the observed address let joiner_peer = state .request .joiner .peer_id() .expect("joiner should be Known after observed_addr"); - assert_eq!(joiner_peer.addr, observed_addr); + assert_eq!(joiner_peer.addr, observed_addr.socket_addr()); } #[test] @@ -1593,7 +1590,7 @@ mod tests { joiner: make_joiner(&joiner), ttl: 3, visited: vec![joiner.clone()], - observed_addr: Some(joiner.addr()), + observed_addr: Some(ObservedAddr::new(joiner.addr())), }; let tx = Transaction::new::(); @@ -1674,7 +1671,7 @@ mod tests { joiner: make_joiner(&joiner), ttl: 3, visited: vec![], - observed_addr: Some(observed_public_addr), + observed_addr: Some(ObservedAddr::new(observed_public_addr)), }, forwarded_to: None, observed_sent: false, diff --git a/crates/core/src/operations/mod.rs b/crates/core/src/operations/mod.rs index 6462c5aeb..25655ff4e 100644 --- a/crates/core/src/operations/mod.rs +++ b/crates/core/src/operations/mod.rs @@ -107,10 +107,7 @@ where Err(err) => { if let Some(addr) = source_addr { network_bridge - .send( - addr.socket_addr(), - NetMessage::V1(NetMessageV1::Aborted(tx_id)), - ) + .send(addr, NetMessage::V1(NetMessageV1::Aborted(tx_id))) .await?; } return Err(err); @@ -160,7 +157,7 @@ where op_manager.completed(id); if let Some(target) = target_addr { tracing::debug!(%id, ?target, "sending final message to target"); - network_bridge.send(target, msg).await?; + network_bridge.send(ObservedAddr::new(target), msg).await?; } return Ok(Some(updated_state)); } else { @@ -168,7 +165,7 @@ where tracing::debug!(%id, "operation in progress"); if let Some(target) = target_addr { tracing::debug!(%id, ?target, "sending updated op state"); - network_bridge.send(target, msg).await?; + network_bridge.send(ObservedAddr::new(target), msg).await?; op_manager.push(id, updated_state).await?; } else { tracing::debug!(%id, "queueing op state for local processing"); @@ -206,7 +203,7 @@ where if let Some(target) = target_addr { tracing::debug!(%tx_id, ?target, "sending back message to target"); - network_bridge.send(target, msg).await?; + network_bridge.send(ObservedAddr::new(target), msg).await?; } } Ok(OperationResult { diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index 07d55f998..a0aedb400 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -571,7 +571,10 @@ impl Operation for PutOp { ); conn_manager - .send(upstream.addr(), NetMessage::from(ack)) + .send( + crate::transport::ObservedAddr::new(upstream.addr()), + NetMessage::from(ack), + ) .await?; new_state = None; } @@ -588,7 +591,8 @@ impl Operation for PutOp { contract: contract.clone(), target: peer.clone(), }; - let f = conn_manager.send(peer.addr(), msg.into()); + let f = conn_manager + .send(crate::transport::ObservedAddr::new(peer.addr()), msg.into()); broadcasting.push(f); } @@ -616,7 +620,9 @@ impl Operation for PutOp { err ); // todo: review this, maybe we should just dropping this subscription - conn_manager.drop_connection(peer.addr()).await?; + conn_manager + .drop_connection(crate::transport::ObservedAddr::new(peer.addr())) + .await?; incorrect_results += 1; } @@ -865,7 +871,7 @@ impl Operation for PutOp { for subscriber in old_subscribers { conn_manager .send( - subscriber.addr(), + crate::transport::ObservedAddr::new(subscriber.addr()), NetMessage::V1(NetMessageV1::Unsubscribed { transaction: Transaction::new::(), key: dropped_key, @@ -1510,7 +1516,7 @@ where let _ = conn_manager .send( - peer.addr(), + crate::transport::ObservedAddr::new(peer.addr()), (PutMsg::PutForward { id, sender: own_pkloc, diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index c69acdd87..a98128506 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -586,7 +586,8 @@ impl Operation for UpdateOp { sender: sender.clone(), target: peer.clone(), }; - let f = conn_manager.send(peer.addr(), msg.into()); + let f = conn_manager + .send(crate::transport::ObservedAddr::new(peer.addr()), msg.into()); broadcasting.push(f); } let error_futures = futures::future::join_all(broadcasting) @@ -611,7 +612,9 @@ impl Operation for UpdateOp { err ); // TODO: review this, maybe we should just dropping this subscription - conn_manager.drop_connection(peer.addr()).await?; + conn_manager + .drop_connection(crate::transport::ObservedAddr::new(peer.addr())) + .await?; incorrect_results += 1; } diff --git a/crates/core/src/transport/mod.rs b/crates/core/src/transport/mod.rs index e30deee56..47133a857 100644 --- a/crates/core/src/transport/mod.rs +++ b/crates/core/src/transport/mod.rs @@ -40,7 +40,9 @@ pub(crate) use self::{ /// /// Using a newtype instead of raw `SocketAddr` makes the address semantics explicit /// and prevents accidental confusion with advertised/claimed addresses. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct ObservedAddr(SocketAddr); impl ObservedAddr { From b4f626db78c24eff5ea5004cc0fdf4ec05ebcd0a Mon Sep 17 00:00:00 2001 From: Ian Clarke Date: Mon, 1 Dec 2025 14:09:42 -0600 Subject: [PATCH 32/32] refactor: move ObservedAddr work to correct PR in stack Reverts commit aaf3e3bb which incorrectly added ObservedAddr newtype in PR #2167. This work belongs in PR #2172 (higher in the stack). Maintains proper PR stack separation and avoids merge conflicts. [AI-assisted - Claude] --- crates/core/src/node/network_bridge.rs | 5 +- .../core/src/node/network_bridge/in_memory.rs | 11 ++-- .../src/node/network_bridge/p2p_protoc.rs | 20 +++---- crates/core/src/operations/connect.rs | 53 ++++++++++--------- crates/core/src/operations/mod.rs | 11 ++-- crates/core/src/operations/put.rs | 16 ++---- crates/core/src/operations/update.rs | 7 +-- crates/core/src/transport/mod.rs | 4 +- 8 files changed, 59 insertions(+), 68 deletions(-) diff --git a/crates/core/src/node/network_bridge.rs b/crates/core/src/node/network_bridge.rs index 482d32c69..d47a955be 100644 --- a/crates/core/src/node/network_bridge.rs +++ b/crates/core/src/node/network_bridge.rs @@ -15,7 +15,6 @@ use serde::{Deserialize, Serialize}; use tokio::sync::mpsc::{self, Receiver, Sender}; use crate::message::{NetMessage, NodeEvent}; -use crate::transport::ObservedAddr; mod handshake; pub(crate) mod in_memory; @@ -33,12 +32,12 @@ pub(crate) type ConnResult = std::result::Result; pub(crate) trait NetworkBridge: Send + Sync { fn drop_connection( &mut self, - peer_addr: ObservedAddr, + peer_addr: SocketAddr, ) -> impl Future> + Send; fn send( &self, - target_addr: ObservedAddr, + target_addr: SocketAddr, msg: NetMessage, ) -> impl Future> + Send; } diff --git a/crates/core/src/node/network_bridge/in_memory.rs b/crates/core/src/node/network_bridge/in_memory.rs index d223b3fda..ff12ad845 100644 --- a/crates/core/src/node/network_bridge/in_memory.rs +++ b/crates/core/src/node/network_bridge/in_memory.rs @@ -2,6 +2,7 @@ use std::{ collections::HashMap, io::Cursor, + net::SocketAddr, sync::{Arc, LazyLock}, time::{Duration, Instant}, }; @@ -16,7 +17,6 @@ use crate::{ message::NetMessage, node::{testing_impl::NetworkBridgeExt, NetEventRegister, OpManager, PeerId}, tracing::NetEventLog, - transport::ObservedAddr, }; #[derive(Clone)] @@ -61,22 +61,19 @@ impl MemoryConnManager { } impl NetworkBridge for MemoryConnManager { - async fn send(&self, target_addr: ObservedAddr, msg: NetMessage) -> super::ConnResult<()> { + async fn send(&self, target_addr: SocketAddr, msg: NetMessage) -> super::ConnResult<()> { self.log_register .register_events(NetEventLog::from_outbound_msg(&msg, &self.op_manager.ring)) .await; // Create a temporary PeerId for tracking (in-memory transport uses PeerId internally) - let target_peer = PeerId::new( - target_addr.socket_addr(), - self.transport.interface_peer.pub_key.clone(), - ); + let target_peer = PeerId::new(target_addr, self.transport.interface_peer.pub_key.clone()); self.op_manager.sending_transaction(&target_peer, &msg); let msg = bincode::serialize(&msg)?; self.transport.send(target_peer, msg); Ok(()) } - async fn drop_connection(&mut self, _peer_addr: ObservedAddr) -> super::ConnResult<()> { + async fn drop_connection(&mut self, _peer_addr: SocketAddr) -> super::ConnResult<()> { Ok(()) } } diff --git a/crates/core/src/node/network_bridge/p2p_protoc.rs b/crates/core/src/node/network_bridge/p2p_protoc.rs index f549ab9ff..7fb6e26e0 100644 --- a/crates/core/src/node/network_bridge/p2p_protoc.rs +++ b/crates/core/src/node/network_bridge/p2p_protoc.rs @@ -76,19 +76,19 @@ impl P2pBridge { } impl NetworkBridge for P2pBridge { - async fn drop_connection(&mut self, peer_addr: ObservedAddr) -> super::ConnResult<()> { + async fn drop_connection(&mut self, peer_addr: SocketAddr) -> super::ConnResult<()> { // Find the peer by address and remove it let peer = self .accepted_peers .iter() - .find(|p| p.addr == peer_addr.socket_addr()) + .find(|p| p.addr == peer_addr) .map(|p| p.clone()); if let Some(peer) = peer { self.accepted_peers.remove(&peer); self.ev_listener_tx - .send(Right(NodeEvent::DropConnection(peer_addr.socket_addr()))) + .send(Right(NodeEvent::DropConnection(peer_addr))) .await - .map_err(|_| ConnectionError::SendNotCompleted(peer_addr.socket_addr()))?; + .map_err(|_| ConnectionError::SendNotCompleted(peer_addr))?; self.log_register .register_events(Either::Left(NetEventLog::disconnected( &self.op_manager.ring, @@ -99,7 +99,7 @@ impl NetworkBridge for P2pBridge { Ok(()) } - async fn send(&self, target_addr: ObservedAddr, msg: NetMessage) -> super::ConnResult<()> { + async fn send(&self, target_addr: SocketAddr, msg: NetMessage) -> super::ConnResult<()> { self.log_register .register_events(NetEventLog::from_outbound_msg(&msg, &self.op_manager.ring)) .await; @@ -107,14 +107,14 @@ impl NetworkBridge for P2pBridge { let target = self .accepted_peers .iter() - .find(|p| p.addr == target_addr.socket_addr()) + .find(|p| p.addr == target_addr) .map(|p| p.clone()); if let Some(ref target) = target { self.op_manager.sending_transaction(target, &msg); self.ev_listener_tx .send(Left((target.clone(), Box::new(msg)))) .await - .map_err(|_| ConnectionError::SendNotCompleted(target_addr.socket_addr()))?; + .map_err(|_| ConnectionError::SendNotCompleted(target_addr))?; } else { // No known peer at this address - create a temporary PeerId for the event // This should rarely happen in practice @@ -123,13 +123,13 @@ impl NetworkBridge for P2pBridge { "Sending to unknown peer address - creating temporary PeerId" ); let temp_peer = PeerId::new( - target_addr.socket_addr(), + target_addr, (*self.op_manager.ring.connection_manager.pub_key).clone(), ); self.ev_listener_tx .send(Left((temp_peer, Box::new(msg)))) .await - .map_err(|_| ConnectionError::SendNotCompleted(target_addr.socket_addr()))?; + .map_err(|_| ConnectionError::SendNotCompleted(target_addr))?; } Ok(()) } @@ -381,7 +381,7 @@ impl P2pConnManager { ) = (remote, &mut msg) { if payload.observed_addr.is_none() { - payload.observed_addr = Some(ObservedAddr::new(remote_addr)); + payload.observed_addr = Some(remote_addr); } } // Pass the source address through to operations for routing. diff --git a/crates/core/src/operations/connect.rs b/crates/core/src/operations/connect.rs index ac7008768..f63818e0d 100644 --- a/crates/core/src/operations/connect.rs +++ b/crates/core/src/operations/connect.rs @@ -134,8 +134,8 @@ pub(crate) struct ConnectRequest { pub ttl: u8, /// Simple visited set to avoid trivial loops. pub visited: Vec, - /// Address observed by the gateway/relay for the joiner, if known. - pub observed_addr: Option, + /// Socket observed by the gateway/relay for the joiner, if known. + pub observed_addr: Option, } /// Acceptance payload returned by candidates. @@ -184,15 +184,15 @@ impl Joiner { } /// Upgrades an Unknown joiner to Known once we observe their address. - pub fn with_observed_address(&self, addr: ObservedAddr) -> Self { + pub fn with_observed_address(&self, addr: SocketAddr) -> Self { match self { - Joiner::Unknown(key) => Joiner::Known(PeerId::new(addr.socket_addr(), key.clone())), + Joiner::Unknown(key) => Joiner::Known(PeerId::new(addr, key.clone())), Joiner::Known(peer_id) => { // Avoid allocation if address hasn't changed - if peer_id.addr == addr.socket_addr() { + if peer_id.addr == addr { self.clone() } else { - Joiner::Known(PeerId::new(addr.socket_addr(), peer_id.pub_key.clone())) + Joiner::Known(PeerId::new(addr, peer_id.pub_key.clone())) } } } @@ -243,7 +243,7 @@ pub(crate) enum ConnectState { #[derive(Debug, Clone)] pub(crate) struct JoinerState { pub target_connections: usize, - pub observed_address: Option, + pub observed_address: Option, pub accepted: HashSet, pub last_progress: Instant, } @@ -283,7 +283,7 @@ pub(crate) struct RelayActions { pub accept_response: Option, pub expect_connection_from: Option, pub forward: Option<(PeerKeyLocation, ConnectRequest)>, - pub observed_address: Option<(PeerKeyLocation, ObservedAddr)>, + pub observed_address: Option<(PeerKeyLocation, SocketAddr)>, /// The target to send the ConnectResponse to (with observed external address). pub response_target: Option, } @@ -588,7 +588,7 @@ impl JoinerState { acceptance } - pub(crate) fn update_observed_address(&mut self, address: ObservedAddr, now: Instant) { + pub(crate) fn update_observed_address(&mut self, address: SocketAddr, now: Instant) { self.observed_address = Some(address); self.last_progress = now; } @@ -639,7 +639,7 @@ impl ConnectOp { id: Transaction, desired_location: Location, target_connections: usize, - observed_address: Option, + observed_address: Option, gateway: Option, backoff: Option, connect_forward_estimator: Arc>, @@ -750,7 +750,7 @@ impl ConnectOp { tx, desired_location, target_connections, - Some(ObservedAddr::new(own.addr())), + Some(own.addr()), Some(target.clone()), None, connect_forward_estimator, @@ -793,7 +793,7 @@ impl ConnectOp { pub(crate) fn handle_observed_address(&mut self, address: SocketAddr, now: Instant) { if let Some(ConnectState::WaitingForResponses(state)) = self.state.as_mut() { - state.update_observed_address(ObservedAddr::new(address), now); + state.update_observed_address(address, now); } } @@ -905,13 +905,16 @@ impl Operation for ConnectOp { let msg = ConnectMsg::ObservedAddress { id: self.id, target: target.clone(), - address: address.socket_addr(), + address, }; // Route through upstream (where the request came from) since we may // not have a direct connection to the target if let Some(upstream) = source_addr { network_bridge - .send(upstream, NetMessage::V1(NetMessageV1::Connect(msg))) + .send( + upstream.socket_addr(), + NetMessage::V1(NetMessageV1::Connect(msg)), + ) .await?; } } @@ -935,7 +938,7 @@ impl Operation for ConnectOp { }; network_bridge .send( - ObservedAddr::new(next.addr()), + next.addr(), NetMessage::V1(NetMessageV1::Connect(forward_msg)), ) .await?; @@ -956,7 +959,7 @@ impl Operation for ConnectOp { if let Some(upstream) = source_addr { network_bridge .send( - upstream, + upstream.socket_addr(), NetMessage::V1(NetMessageV1::Connect(response_msg)), ) .await?; @@ -1051,7 +1054,7 @@ impl Operation for ConnectOp { }; network_bridge .send( - ObservedAddr::new(upstream.addr()), + upstream.addr(), NetMessage::V1(NetMessageV1::Connect(forward_msg)), ) .await?; @@ -1418,7 +1421,7 @@ mod tests { joiner: make_joiner(&joiner), ttl: 3, visited: vec![], - observed_addr: Some(ObservedAddr::new(joiner.addr())), + observed_addr: Some(joiner.addr()), }, forwarded_to: None, observed_sent: false, @@ -1453,7 +1456,7 @@ mod tests { joiner: make_joiner(&joiner), ttl: 2, visited: vec![], - observed_addr: Some(ObservedAddr::new(joiner.addr())), + observed_addr: Some(joiner.addr()), }, forwarded_to: None, observed_sent: false, @@ -1483,10 +1486,10 @@ mod tests { fn relay_emits_observed_address_for_private_joiner() { let self_loc = make_peer(4050); let joiner = make_peer(5050); - let observed_addr = ObservedAddr::new(SocketAddr::new( + let observed_addr = SocketAddr::new( IpAddr::V4(Ipv4Addr::new(203, 0, 113, 10)), joiner.addr().port(), - )); + ); let mut state = RelayState { upstream: joiner.clone(), request: ConnectRequest { @@ -1512,14 +1515,14 @@ mod tests { .observed_address .expect("expected observed address update"); assert_eq!(addr, observed_addr); - assert_eq!(target.addr(), observed_addr.socket_addr()); + assert_eq!(target.addr(), observed_addr); // After handling, the joiner should be upgraded to Known with the observed address let joiner_peer = state .request .joiner .peer_id() .expect("joiner should be Known after observed_addr"); - assert_eq!(joiner_peer.addr, observed_addr.socket_addr()); + assert_eq!(joiner_peer.addr, observed_addr); } #[test] @@ -1590,7 +1593,7 @@ mod tests { joiner: make_joiner(&joiner), ttl: 3, visited: vec![joiner.clone()], - observed_addr: Some(ObservedAddr::new(joiner.addr())), + observed_addr: Some(joiner.addr()), }; let tx = Transaction::new::(); @@ -1671,7 +1674,7 @@ mod tests { joiner: make_joiner(&joiner), ttl: 3, visited: vec![], - observed_addr: Some(ObservedAddr::new(observed_public_addr)), + observed_addr: Some(observed_public_addr), }, forwarded_to: None, observed_sent: false, diff --git a/crates/core/src/operations/mod.rs b/crates/core/src/operations/mod.rs index 25655ff4e..6462c5aeb 100644 --- a/crates/core/src/operations/mod.rs +++ b/crates/core/src/operations/mod.rs @@ -107,7 +107,10 @@ where Err(err) => { if let Some(addr) = source_addr { network_bridge - .send(addr, NetMessage::V1(NetMessageV1::Aborted(tx_id))) + .send( + addr.socket_addr(), + NetMessage::V1(NetMessageV1::Aborted(tx_id)), + ) .await?; } return Err(err); @@ -157,7 +160,7 @@ where op_manager.completed(id); if let Some(target) = target_addr { tracing::debug!(%id, ?target, "sending final message to target"); - network_bridge.send(ObservedAddr::new(target), msg).await?; + network_bridge.send(target, msg).await?; } return Ok(Some(updated_state)); } else { @@ -165,7 +168,7 @@ where tracing::debug!(%id, "operation in progress"); if let Some(target) = target_addr { tracing::debug!(%id, ?target, "sending updated op state"); - network_bridge.send(ObservedAddr::new(target), msg).await?; + network_bridge.send(target, msg).await?; op_manager.push(id, updated_state).await?; } else { tracing::debug!(%id, "queueing op state for local processing"); @@ -203,7 +206,7 @@ where if let Some(target) = target_addr { tracing::debug!(%tx_id, ?target, "sending back message to target"); - network_bridge.send(ObservedAddr::new(target), msg).await?; + network_bridge.send(target, msg).await?; } } Ok(OperationResult { diff --git a/crates/core/src/operations/put.rs b/crates/core/src/operations/put.rs index a0aedb400..07d55f998 100644 --- a/crates/core/src/operations/put.rs +++ b/crates/core/src/operations/put.rs @@ -571,10 +571,7 @@ impl Operation for PutOp { ); conn_manager - .send( - crate::transport::ObservedAddr::new(upstream.addr()), - NetMessage::from(ack), - ) + .send(upstream.addr(), NetMessage::from(ack)) .await?; new_state = None; } @@ -591,8 +588,7 @@ impl Operation for PutOp { contract: contract.clone(), target: peer.clone(), }; - let f = conn_manager - .send(crate::transport::ObservedAddr::new(peer.addr()), msg.into()); + let f = conn_manager.send(peer.addr(), msg.into()); broadcasting.push(f); } @@ -620,9 +616,7 @@ impl Operation for PutOp { err ); // todo: review this, maybe we should just dropping this subscription - conn_manager - .drop_connection(crate::transport::ObservedAddr::new(peer.addr())) - .await?; + conn_manager.drop_connection(peer.addr()).await?; incorrect_results += 1; } @@ -871,7 +865,7 @@ impl Operation for PutOp { for subscriber in old_subscribers { conn_manager .send( - crate::transport::ObservedAddr::new(subscriber.addr()), + subscriber.addr(), NetMessage::V1(NetMessageV1::Unsubscribed { transaction: Transaction::new::(), key: dropped_key, @@ -1516,7 +1510,7 @@ where let _ = conn_manager .send( - crate::transport::ObservedAddr::new(peer.addr()), + peer.addr(), (PutMsg::PutForward { id, sender: own_pkloc, diff --git a/crates/core/src/operations/update.rs b/crates/core/src/operations/update.rs index a98128506..c69acdd87 100644 --- a/crates/core/src/operations/update.rs +++ b/crates/core/src/operations/update.rs @@ -586,8 +586,7 @@ impl Operation for UpdateOp { sender: sender.clone(), target: peer.clone(), }; - let f = conn_manager - .send(crate::transport::ObservedAddr::new(peer.addr()), msg.into()); + let f = conn_manager.send(peer.addr(), msg.into()); broadcasting.push(f); } let error_futures = futures::future::join_all(broadcasting) @@ -612,9 +611,7 @@ impl Operation for UpdateOp { err ); // TODO: review this, maybe we should just dropping this subscription - conn_manager - .drop_connection(crate::transport::ObservedAddr::new(peer.addr())) - .await?; + conn_manager.drop_connection(peer.addr()).await?; incorrect_results += 1; } diff --git a/crates/core/src/transport/mod.rs b/crates/core/src/transport/mod.rs index 47133a857..e30deee56 100644 --- a/crates/core/src/transport/mod.rs +++ b/crates/core/src/transport/mod.rs @@ -40,9 +40,7 @@ pub(crate) use self::{ /// /// Using a newtype instead of raw `SocketAddr` makes the address semantics explicit /// and prevents accidental confusion with advertised/claimed addresses. -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct ObservedAddr(SocketAddr); impl ObservedAddr {