From dd83c341f7fcc2c9534cf35929e8be5b745ea16a Mon Sep 17 00:00:00 2001 From: SDartayet Date: Mon, 25 Aug 2025 16:48:09 -0300 Subject: [PATCH 01/55] Changed fixtures to Fusaka ones and added basic support in the runners --- cmd/ef_tests/blockchain/.fixtures_url | 2 +- cmd/ef_tests/blockchain/network.rs | 37 +++++++++++++++++++++ cmd/ef_tests/blockchain/test_runner.rs | 1 + cmd/ef_tests/state/.fixtures_url | 2 +- cmd/ef_tests/state/deserialize.rs | 1 + cmd/ef_tests/state_v2/src/modules/runner.rs | 3 ++ crates/common/types/genesis.rs | 9 +++++ tooling/hive_report/src/main.rs | 3 ++ 8 files changed, 56 insertions(+), 2 deletions(-) diff --git a/cmd/ef_tests/blockchain/.fixtures_url b/cmd/ef_tests/blockchain/.fixtures_url index 52caca16d55..b116ae3557b 100644 --- a/cmd/ef_tests/blockchain/.fixtures_url +++ b/cmd/ef_tests/blockchain/.fixtures_url @@ -1 +1 @@ -https://github.com/ethereum/execution-spec-tests/releases/download/v4.5.0/fixtures_develop.tar.gz +https://github.com/ethereum/execution-spec-tests/releases/download/fusaka-devnet-5%40v1.1.0/fixtures_fusaka-devnet-5.tar.gz diff --git a/cmd/ef_tests/blockchain/network.rs b/cmd/ef_tests/blockchain/network.rs index 03dd5c87dd7..64eafc5293d 100644 --- a/cmd/ef_tests/blockchain/network.rs +++ b/cmd/ef_tests/blockchain/network.rs @@ -53,6 +53,33 @@ lazy_static! { prague_time: Some(0), ..*CANCUN_TO_PRAGUE_AT_15K_CONFIG }; + + pub static ref PRAGUE_TO_OSAKA_AT_15K_CONFIG: ChainConfig = ChainConfig { + osaka_time: Some(0x3a98), + ..*PRAGUE_CONFIG + + }; + + pub static ref OSAKA_CONFIG: ChainConfig = ChainConfig { + osaka_time: Some(0), + ..*PRAGUE_CONFIG + }; + + pub static ref OSAKA_TO_BPO1_AT_15K_CONFIG: ChainConfig = ChainConfig { + bpo1_time: Some(0x3a98), + ..*OSAKA_CONFIG + }; + + pub static ref BPO1_TO_BPO2_AT_15K_CONFIG: ChainConfig = ChainConfig { + bpo2_time: Some(0x3a98), + ..*PRAGUE_CONFIG + }; + + pub static ref BPO2_TO_BPO3_AT_15K_CONFIG: ChainConfig = ChainConfig { + bpo3_time: Some(0x3a98), + ..*PRAGUE_CONFIG + }; + } #[derive(Debug, Deserialize, PartialEq, Eq, PartialOrd, Ord)] @@ -73,6 +100,11 @@ pub enum Network { Cancun = 11, CancunToPragueAtTime15k = 12, Prague = 13, + PragueToOsakaAtTime15k = 14, + Osaka = 15, + OsakaToBPO1AtTime15k = 16, + BPO1ToBPO2AtTime15k = 17, + BPO2ToBPO3AtTime15k = 18, } impl Network { @@ -85,6 +117,11 @@ impl Network { Network::Cancun => &CANCUN_CONFIG, Network::CancunToPragueAtTime15k => &CANCUN_TO_PRAGUE_AT_15K_CONFIG, Network::Prague => &PRAGUE_CONFIG, + Network::PragueToOsakaAtTime15k => &PRAGUE_TO_OSAKA_AT_15K_CONFIG, + Network::Osaka => &OSAKA_CONFIG, + Network::OsakaToBPO1AtTime15k => &OSAKA_TO_BPO1_AT_15K_CONFIG, + Network::BPO1ToBPO2AtTime15k => &BPO1_TO_BPO2_AT_15K_CONFIG, + Network::BPO2ToBPO3AtTime15k => &BPO2_TO_BPO3_AT_15K_CONFIG, Network::Frontier | Network::Homestead | Network::ConstantinopleFix diff --git a/cmd/ef_tests/blockchain/test_runner.rs b/cmd/ef_tests/blockchain/test_runner.rs index c72c2d16d84..56c14679f99 100644 --- a/cmd/ef_tests/blockchain/test_runner.rs +++ b/cmd/ef_tests/blockchain/test_runner.rs @@ -36,6 +36,7 @@ pub fn parse_and_execute( for (test_key, test) in tests { let should_skip_test = test.network < Network::Merge + || test.network > Network::Prague || skipped_tests .map(|skipped| skipped.iter().any(|s| test_key.contains(s))) .unwrap_or(false); diff --git a/cmd/ef_tests/state/.fixtures_url b/cmd/ef_tests/state/.fixtures_url index 52caca16d55..b116ae3557b 100644 --- a/cmd/ef_tests/state/.fixtures_url +++ b/cmd/ef_tests/state/.fixtures_url @@ -1 +1 @@ -https://github.com/ethereum/execution-spec-tests/releases/download/v4.5.0/fixtures_develop.tar.gz +https://github.com/ethereum/execution-spec-tests/releases/download/fusaka-devnet-5%40v1.1.0/fixtures_fusaka-devnet-5.tar.gz diff --git a/cmd/ef_tests/state/deserialize.rs b/cmd/ef_tests/state/deserialize.rs index 217be3f02e9..4d95edbfbcb 100644 --- a/cmd/ef_tests/state/deserialize.rs +++ b/cmd/ef_tests/state/deserialize.rs @@ -303,6 +303,7 @@ where "Shanghai" => Fork::Shanghai, "Cancun" => Fork::Cancun, "Prague" => Fork::Prague, + "Osaka" => Fork::Osaka, "Byzantium" => Fork::Byzantium, "EIP158" => Fork::SpuriousDragon, "EIP150" => Fork::Tangerine, diff --git a/cmd/ef_tests/state_v2/src/modules/runner.rs b/cmd/ef_tests/state_v2/src/modules/runner.rs index abcc4836d96..6825fd33bfa 100644 --- a/cmd/ef_tests/state_v2/src/modules/runner.rs +++ b/cmd/ef_tests/state_v2/src/modules/runner.rs @@ -39,6 +39,9 @@ pub async fn run_tests(tests: Vec) -> Result<(), RunnerError> { let mut total_run = 0; for test in tests { + if test.path.starts_with("osaka") { + continue; + } run_test( &test, &mut passing_tests, diff --git a/crates/common/types/genesis.rs b/crates/common/types/genesis.rs index 3d935d118d4..18315298048 100644 --- a/crates/common/types/genesis.rs +++ b/crates/common/types/genesis.rs @@ -188,6 +188,12 @@ pub struct ChainConfig { pub verkle_time: Option, pub osaka_time: Option, + pub bpo1_time: Option, + pub bpo2_time: Option, + pub bpo3_time: Option, + pub bpo4_time: Option, + pub bpo5_time: Option, + /// Amount of total difficulty reached by the network that triggers the consensus upgrade. pub terminal_total_difficulty: Option, /// Network has already passed the terminal total difficult @@ -198,6 +204,9 @@ pub struct ChainConfig { #[rkyv(with = rkyv_utils::H160Wrapper)] // Deposits system contract address pub deposit_contract_address: Address, + + #[serde(default)] + pub enable_verkle_at_genesis: bool, } #[repr(u8)] diff --git a/tooling/hive_report/src/main.rs b/tooling/hive_report/src/main.rs index 4db06a631fa..2a1d6bf7c20 100644 --- a/tooling/hive_report/src/main.rs +++ b/tooling/hive_report/src/main.rs @@ -137,10 +137,13 @@ fn main() -> Result<(), Box> { // Prague let result_prague = create_fork_result(&json_data, "Prague", "fork_Prague"); + let result_osaka = create_fork_result(&json_data, "Osaka", "fork_Osaka"); + results.push(result_paris); results.push(result_shanghai); results.push(result_cancun); results.push(result_prague); + results.push(result_osaka); } else { let total_tests = json_data.test_cases.len(); let passed_tests = json_data From 0f3d680663bb16ad2a053f04218a0cac20b5ca7c Mon Sep 17 00:00:00 2001 From: SDartayet Date: Tue, 26 Aug 2025 13:36:36 -0300 Subject: [PATCH 02/55] Made it so RPC tests coincide with the updated Chainconfig --- Cargo.lock | 25 +++++++++++++++++++++++++ crates/common/types/genesis.rs | 1 + crates/networking/rpc/lib.rs | 2 ++ crates/networking/rpc/rpc.rs | 6 ++++++ 4 files changed, 34 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 23f8fcdda07..c485fac3742 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8476,6 +8476,29 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "procfs" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" +dependencies = [ + "bitflags 2.9.1", + "hex", + "lazy_static", + "procfs-core", + "rustix 0.38.44", +] + +[[package]] +name = "procfs-core" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" +dependencies = [ + "bitflags 2.9.1", + "hex", +] + [[package]] name = "prometheus" version = "0.13.4" @@ -8485,8 +8508,10 @@ dependencies = [ "cfg-if 1.0.1", "fnv", "lazy_static", + "libc", "memchr", "parking_lot 0.12.4", + "procfs", "protobuf", "thiserror 1.0.69", ] diff --git a/crates/common/types/genesis.rs b/crates/common/types/genesis.rs index 18315298048..e9272dd3531 100644 --- a/crates/common/types/genesis.rs +++ b/crates/common/types/genesis.rs @@ -50,6 +50,7 @@ pub struct Genesis { pub blob_gas_used: Option, #[serde(default, with = "crate::serde_utils::u64::hex_str_opt")] pub excess_blob_gas: Option, + pub requests_hash: Option, } diff --git a/crates/networking/rpc/lib.rs b/crates/networking/rpc/lib.rs index 4604b7946c2..47fb52977f7 100644 --- a/crates/networking/rpc/lib.rs +++ b/crates/networking/rpc/lib.rs @@ -1,3 +1,5 @@ +#![recursion_limit = "400"] + mod admin; mod authentication; pub mod debug; diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index bf09e3f9de4..a97284fe10f 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -512,10 +512,16 @@ mod tests { "pragueTime": 1718232101, "verkleTime": null, "osakaTime": null, + "bpo1Time": null, + "bpo2Time": null, + "bpo3Time": null, + "bpo4Time": null, + "bpo5Time": null, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true, "blobSchedule": blob_schedule, "depositContractAddress": H160::from_str("0x00000000219ab540356cbb839cbe05303d7705fa").unwrap(), + "enableVerkleAtGenesis": false, } }, } From 9c7418895736789b9419f86103cdcdf70b2830fe Mon Sep 17 00:00:00 2001 From: SDartayet Date: Tue, 26 Aug 2025 15:19:52 -0300 Subject: [PATCH 03/55] Updated ordered config in genesis json parser --- cmd/ethrex/build.rs | 1 + tooling/genesis/src/genesis.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/cmd/ethrex/build.rs b/cmd/ethrex/build.rs index 2c9a109f50f..4a9a83b95eb 100644 --- a/cmd/ethrex/build.rs +++ b/cmd/ethrex/build.rs @@ -367,6 +367,7 @@ pub fn update_genesis_file( l2_genesis_path: &PathBuf, out_dir: &Path, ) -> Result<(), SystemContractsUpdaterError> { + dbg!(l2_genesis_path); let mut genesis = read_genesis_file(l2_genesis_path.to_str().ok_or( SystemContractsUpdaterError::InvalidPath( "Failed to convert l2 genesis path to string".to_string(), diff --git a/tooling/genesis/src/genesis.rs b/tooling/genesis/src/genesis.rs index 62497ba1a61..829b9de4932 100644 --- a/tooling/genesis/src/genesis.rs +++ b/tooling/genesis/src/genesis.rs @@ -32,6 +32,7 @@ fn sort_config(genesis_map: &mut Map) -> Result Date: Tue, 26 Aug 2025 15:22:44 -0300 Subject: [PATCH 04/55] Remove dbg! accidentally left in --- cmd/ethrex/build.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/ethrex/build.rs b/cmd/ethrex/build.rs index 4a9a83b95eb..2c9a109f50f 100644 --- a/cmd/ethrex/build.rs +++ b/cmd/ethrex/build.rs @@ -367,7 +367,6 @@ pub fn update_genesis_file( l2_genesis_path: &PathBuf, out_dir: &Path, ) -> Result<(), SystemContractsUpdaterError> { - dbg!(l2_genesis_path); let mut genesis = read_genesis_file(l2_genesis_path.to_str().ok_or( SystemContractsUpdaterError::InvalidPath( "Failed to convert l2 genesis path to string".to_string(), From 1ff99ff8796f562c6d49ad79fab5bb5f6a68b221 Mon Sep 17 00:00:00 2001 From: SDartayet <44068466+SDartayet@users.noreply.github.com> Date: Tue, 26 Aug 2025 15:23:07 -0300 Subject: [PATCH 05/55] Update genesis.rs --- crates/common/types/genesis.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/common/types/genesis.rs b/crates/common/types/genesis.rs index e9272dd3531..18315298048 100644 --- a/crates/common/types/genesis.rs +++ b/crates/common/types/genesis.rs @@ -50,7 +50,6 @@ pub struct Genesis { pub blob_gas_used: Option, #[serde(default, with = "crate::serde_utils::u64::hex_str_opt")] pub excess_blob_gas: Option, - pub requests_hash: Option, } From ee01072524eef3269cf92da47c17ead5303399b2 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Tue, 26 Aug 2025 18:39:39 -0300 Subject: [PATCH 06/55] Further fixes --- tooling/ef_tests/blockchain/test_runner.rs | 8 +++- tooling/ef_tests/blockchain/types.rs | 40 ++++++++++++++++++- tooling/ef_tests/state/runner/levm_runner.rs | 1 + tooling/ef_tests/state/runner/mod.rs | 1 + .../ef_tests/state_v2/src/modules/runner.rs | 10 ++++- 5 files changed, 57 insertions(+), 3 deletions(-) diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index 56c14679f99..72ff06dc49a 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -31,12 +31,18 @@ pub fn parse_and_execute( ) -> datatest_stable::Result<()> { let rt = tokio::runtime::Runtime::new().unwrap(); let tests = parse_tests(path); + //Test with the Fusaka tests that should pass. TODO: Once we've implemented all the Fusaka EIPs this should be removed + //EIPs should be added as strings in the format 'eip-XXXX' + let fusaka_eips_to_test: Vec<&str> = vec![]; let mut failures = Vec::new(); for (test_key, test) in tests { + let test_eip = test.info.clone().reference_spec.unwrap_or_default(); + let should_skip_test = test.network < Network::Merge - || test.network > Network::Prague + || (test.network > Network::Prague + && !fusaka_eips_to_test.iter().any(|eip| test_eip.contains(eip))) || skipped_tests .map(|skipped| skipped.iter().any(|s| test_key.contains(s))) .unwrap_or(false); diff --git a/tooling/ef_tests/blockchain/types.rs b/tooling/ef_tests/blockchain/types.rs index e5e979ebe58..6633e724546 100644 --- a/tooling/ef_tests/blockchain/types.rs +++ b/tooling/ef_tests/blockchain/types.rs @@ -16,7 +16,7 @@ use crate::network::Network; #[serde(rename_all = "camelCase", deny_unknown_fields)] pub struct TestUnit { #[serde(default, rename = "_info")] - pub info: Option, + pub info: Info, pub blocks: Vec, pub genesis_block_header: Header, #[serde(rename = "genesisRLP", with = "ethrex_common::serde_utils::bytes")] @@ -29,6 +29,44 @@ pub struct TestUnit { pub config: FixtureConfig, } +/// General information about the test. Matches the `_info` field in the `.json` file. +#[derive(Debug, Deserialize, Clone, Default)] +pub struct Info { + #[serde(default)] + pub comment: Option, + #[serde(rename = "filling-rpc-server", default)] + pub filling_rpc_server: Option, + #[serde(rename = "filling-tool-version", default)] + pub filling_tool_version: Option, + #[serde(rename = "generatedTestHash", default)] + pub generated_test_hash: Option, + #[serde(default)] + pub labels: Option>, + #[serde(default)] + pub lllcversion: Option, + #[serde(default)] + pub solidity: Option, + #[serde(default)] + pub source: Option, + #[serde(rename = "sourceHash", default)] + pub source_hash: Option, + // These fields are implemented in the new version of the test vectors (Prague). + #[serde(rename = "hash", default)] + pub hash: Option, + #[serde(rename = "filling-transition-tool", default)] + pub filling_transition_tool: Option, + #[serde(default)] + pub description: Option, + #[serde(default)] + pub url: Option, + #[serde(rename = "fixture_format", default)] + pub fixture_format: Option, + #[serde(rename = "reference-spec", default)] + pub reference_spec: Option, + #[serde(rename = "reference-spec-version", default)] + pub reference_spec_version: Option, +} + #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct FixtureConfig { diff --git a/tooling/ef_tests/state/runner/levm_runner.rs b/tooling/ef_tests/state/runner/levm_runner.rs index 8350af1b5ce..c718ecfc34f 100644 --- a/tooling/ef_tests/state/runner/levm_runner.rs +++ b/tooling/ef_tests/state/runner/levm_runner.rs @@ -36,6 +36,7 @@ pub async fn run_ef_test(test: &EFTest) -> Result) -> Result<(), RunnerError> { let mut failing_tests = 0; let mut total_run = 0; + //Test with the Fusaka tests that should pass. TODO: Once we've implemented all the Fusaka EIPs this should be removed + //EIPs should be added as strings in the format 'eip-XXXX' + let fusaka_eips_to_test: Vec<&str> = vec!["eip-7939"]; + for test in tests { - if test.path.starts_with("osaka") { + let test_eip = test._info.clone().reference_spec.unwrap_or_default(); + + if test.path.to_str().unwrap().contains("osaka") + && !fusaka_eips_to_test.iter().any(|eip| test_eip.contains(eip)) + { continue; } run_test( From 1d9bd336c132dbdac13d93270645a964adacc0a3 Mon Sep 17 00:00:00 2001 From: SDartayet <44068466+SDartayet@users.noreply.github.com> Date: Tue, 26 Aug 2025 18:40:58 -0300 Subject: [PATCH 07/55] Update levm_runner.rs --- tooling/ef_tests/state/runner/levm_runner.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tooling/ef_tests/state/runner/levm_runner.rs b/tooling/ef_tests/state/runner/levm_runner.rs index c718ecfc34f..8350af1b5ce 100644 --- a/tooling/ef_tests/state/runner/levm_runner.rs +++ b/tooling/ef_tests/state/runner/levm_runner.rs @@ -36,7 +36,6 @@ pub async fn run_ef_test(test: &EFTest) -> Result Date: Tue, 26 Aug 2025 18:41:34 -0300 Subject: [PATCH 08/55] Update mod.rs --- tooling/ef_tests/state/runner/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tooling/ef_tests/state/runner/mod.rs b/tooling/ef_tests/state/runner/mod.rs index 8b8eb24f448..650a5531594 100644 --- a/tooling/ef_tests/state/runner/mod.rs +++ b/tooling/ef_tests/state/runner/mod.rs @@ -141,7 +141,6 @@ async fn run_with_levm( if is_not_specific || is_ignored { continue; } - if opts.verbose { println!("Running test: {:?}", test.name); } From 743af2f36805f4f4bc6d235a2f4c123478cf9076 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Wed, 27 Aug 2025 16:18:56 -0300 Subject: [PATCH 09/55] Added better ways to unskip osaka tests --- tooling/ef_tests/blockchain/test_runner.rs | 10 +++++++++- tooling/ef_tests/blockchain/types.rs | 2 +- tooling/ef_tests/state/runner/levm_runner.rs | 21 ++++++++++++++++++++ tooling/ef_tests/state/runner/mod.rs | 2 +- 4 files changed, 32 insertions(+), 3 deletions(-) diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index 72ff06dc49a..1779f0fb892 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -11,6 +11,7 @@ use ethrex_blockchain::{ fork_choice::apply_fork_choice, }; use ethrex_common::{ + H256, constants::EMPTY_KECCACK_HASH, types::{ Account as CoreAccount, Block as CoreBlock, BlockHeader as CoreBlockHeader, @@ -35,6 +36,10 @@ pub fn parse_and_execute( //EIPs should be added as strings in the format 'eip-XXXX' let fusaka_eips_to_test: Vec<&str> = vec![]; + //Hashes of any other tests to run, that don't correspond to an especific EIP (for examples, some integration tests) + //We should really remove this once we're finished with implementing Fusaka, but it's a good-enough workaround to run specific tests for now + let hashes_of_fusaka_tests_to_run: Vec<&str> = vec![]; + let mut failures = Vec::new(); for (test_key, test) in tests { @@ -42,7 +47,10 @@ pub fn parse_and_execute( let should_skip_test = test.network < Network::Merge || (test.network > Network::Prague - && !fusaka_eips_to_test.iter().any(|eip| test_eip.contains(eip))) + && !fusaka_eips_to_test.iter().any(|eip| test_eip.contains(eip)) + && !hashes_of_tests_to_run + .iter() + .any(|hash| *hash == test.info.hash.clone().unwrap())) || skipped_tests .map(|skipped| skipped.iter().any(|s| test_key.contains(s))) .unwrap_or(false); diff --git a/tooling/ef_tests/blockchain/types.rs b/tooling/ef_tests/blockchain/types.rs index 6633e724546..11efa67a758 100644 --- a/tooling/ef_tests/blockchain/types.rs +++ b/tooling/ef_tests/blockchain/types.rs @@ -52,7 +52,7 @@ pub struct Info { pub source_hash: Option, // These fields are implemented in the new version of the test vectors (Prague). #[serde(rename = "hash", default)] - pub hash: Option, + pub hash: Option, #[serde(rename = "filling-transition-tool", default)] pub filling_transition_tool: Option, #[serde(default)] diff --git a/tooling/ef_tests/state/runner/levm_runner.rs b/tooling/ef_tests/state/runner/levm_runner.rs index 8350af1b5ce..81ed481f6a0 100644 --- a/tooling/ef_tests/state/runner/levm_runner.rs +++ b/tooling/ef_tests/state/runner/levm_runner.rs @@ -36,7 +36,28 @@ pub async fn run_ef_test(test: &EFTest) -> Result = vec![]; + + //Names of any other tests to run, that don't correspond to an especific EIP (for examples, some integration tests) + //We should really remove this once we're finished with implementing Fusaka, but it's a good-enough workaround to run specific tests for now + let names_of_fusaka_tests_to_run: Vec<&str> = vec![]; + + let test_eip = test._info.clone().reference_spec.unwrap_or_default(); + for fork in test.post.forks.keys() { + if fork == &Fork::Osaka + && !fusaka_eips_to_test.iter().any(|eip| test_eip.contains(eip)) + && !names_of_fusaka_tests_to_run + .iter() + .any(|name| *name == test.name) + { + continue; + } + //dbg!(test.name.clone()); + let mut ef_test_report_fork = EFTestReportForkResult::new(); for (vector, _tx) in test.transactions.iter() { diff --git a/tooling/ef_tests/state/runner/mod.rs b/tooling/ef_tests/state/runner/mod.rs index 650a5531594..bddd5d6155f 100644 --- a/tooling/ef_tests/state/runner/mod.rs +++ b/tooling/ef_tests/state/runner/mod.rs @@ -61,7 +61,7 @@ pub struct EFTestRunnerOptions { long, value_name = "FORK", value_delimiter = ',', - default_value = "Merge,Shanghai,Cancun,Prague" + default_value = "Merge,Shanghai,Cancun,Prague,Osaka" )] pub forks: Option>, /// For running specific .json files From 0025aefc607cdea93ce597abd210e58385dd0680 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Wed, 27 Aug 2025 16:20:40 -0300 Subject: [PATCH 10/55] Small fixes to previous commit --- tooling/ef_tests/blockchain/test_runner.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index 1779f0fb892..85f77110a57 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -11,7 +11,6 @@ use ethrex_blockchain::{ fork_choice::apply_fork_choice, }; use ethrex_common::{ - H256, constants::EMPTY_KECCACK_HASH, types::{ Account as CoreAccount, Block as CoreBlock, BlockHeader as CoreBlockHeader, @@ -48,7 +47,7 @@ pub fn parse_and_execute( let should_skip_test = test.network < Network::Merge || (test.network > Network::Prague && !fusaka_eips_to_test.iter().any(|eip| test_eip.contains(eip)) - && !hashes_of_tests_to_run + && !hashes_of_fusaka_tests_to_run .iter() .any(|hash| *hash == test.info.hash.clone().unwrap())) || skipped_tests From 762aba7ec1ad74264aeb4d70f357adb8e1102517 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Wed, 27 Aug 2025 16:25:34 -0300 Subject: [PATCH 11/55] Uncommented line that prints fusaka report in cli --- tooling/ef_tests/state/report.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tooling/ef_tests/state/report.rs b/tooling/ef_tests/state/report.rs index df3c91c2ada..3f4fecc7d41 100644 --- a/tooling/ef_tests/state/report.rs +++ b/tooling/ef_tests/state/report.rs @@ -211,8 +211,7 @@ pub fn summary_for_shell(reports: &[EFTestReport]) -> String { }, // NOTE: Keep in order, see the Fork Enum to check // NOTE: Uncomment the summaries if EF tests for those specific forks exist. - - // fork_summary_shell(reports, Fork::Osaka), + fork_summary_shell(reports, Fork::Osaka), fork_summary_shell(reports, Fork::Prague), fork_summary_shell(reports, Fork::Cancun), fork_summary_shell(reports, Fork::Shanghai), From c140755e0519660058d7f3acffd9bf46f401f335 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Wed, 27 Aug 2025 16:29:36 -0300 Subject: [PATCH 12/55] Small fix to previous commit --- tooling/ef_tests/state/report.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tooling/ef_tests/state/report.rs b/tooling/ef_tests/state/report.rs index 3f4fecc7d41..72c9d60bf3e 100644 --- a/tooling/ef_tests/state/report.rs +++ b/tooling/ef_tests/state/report.rs @@ -200,7 +200,7 @@ pub fn summary_for_shell(reports: &[EFTestReport]) -> String { let total_run = total_fork_test_run(reports); let success_percentage = (total_passed as f64 / total_run as f64) * 100.0; format!( - "{} {}/{total_run} ({success_percentage:.2}%)\n\n{}\n{}\n{}\n{}\n\n\n{}\n", + "{} {}/{total_run} ({success_percentage:.2}%)\n\n{}\n{}\n{}\n{}\n{}\n\n\n{}\n", "Summary:".bold(), if total_passed == total_run { format!("{total_passed}").green() From 3effd10d5643329a999fffea4ade0e3c3de3dc11 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Wed, 27 Aug 2025 17:42:32 -0300 Subject: [PATCH 13/55] Added missing report line for Osaka tests --- tooling/ef_tests/state/report.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/tooling/ef_tests/state/report.rs b/tooling/ef_tests/state/report.rs index 72c9d60bf3e..18304d6a523 100644 --- a/tooling/ef_tests/state/report.rs +++ b/tooling/ef_tests/state/report.rs @@ -324,6 +324,7 @@ impl Display for EFTestsReport { let total_run = total_fork_test_run(&self.0); writeln!(f, "Summary: {total_passed}/{total_run}",)?; writeln!(f)?; + writeln!(f, "{}", fork_summary_shell(&self.0, Fork::Osaka))?; writeln!(f, "{}", fork_summary_shell(&self.0, Fork::Prague))?; writeln!(f, "{}", fork_summary_shell(&self.0, Fork::Cancun))?; writeln!(f, "{}", fork_summary_shell(&self.0, Fork::Shanghai))?; From 08724e73f3e1022d60417637177231b1637c938c Mon Sep 17 00:00:00 2001 From: SDartayet Date: Wed, 27 Aug 2025 18:16:30 -0300 Subject: [PATCH 14/55] Made it so revm skips osaka tests --- tooling/ef_tests/blockchain/test_runner.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index 7cd21fa8238..12b2dee5469 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -47,10 +47,14 @@ pub fn parse_and_execute( let should_skip_test = test.network < Network::Merge || (test.network > Network::Prague - && !fusaka_eips_to_test.iter().any(|eip| test_eip.contains(eip)) - && !hashes_of_fusaka_tests_to_run - .iter() - .any(|hash| *hash == test.info.hash.clone().unwrap())) + && (!fusaka_eips_to_test.iter().any(|eip| test_eip.contains(eip)) + && !hashes_of_fusaka_tests_to_run + .iter() + .any(|hash| *hash == test.info.hash.clone().unwrap()) + || match evm { + EvmEngine::LEVM => false, + EvmEngine::REVM => true, + })) || skipped_tests .map(|skipped| skipped.iter().any(|s| test_key.contains(s))) .unwrap_or(false); From d04cbcff166027c41448c1b673fbc126c694131d Mon Sep 17 00:00:00 2001 From: SDartayet Date: Thu, 28 Aug 2025 10:28:22 -0300 Subject: [PATCH 15/55] Initial implementation --- crates/blockchain/blockchain.rs | 11 +++++++++-- crates/blockchain/error.rs | 2 ++ crates/common/constants.rs | 6 ++++++ tooling/ef_tests/blockchain/test_runner.rs | 2 +- 4 files changed, 18 insertions(+), 3 deletions(-) diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index e4dc31a3306..82c7de4182c 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -12,7 +12,7 @@ use bytes::Bytes; use constants::{MAX_INITCODE_SIZE, MAX_TRANSACTION_DATA_SIZE}; use error::MempoolError; use error::{ChainError, InvalidBlockError}; -use ethrex_common::constants::{GAS_PER_BLOB, MIN_BASE_FEE_PER_BLOB_GAS}; +use ethrex_common::constants::{GAS_PER_BLOB, MAX_BLOCK_SIZE, MIN_BASE_FEE_PER_BLOB_GAS}; use ethrex_common::types::block_execution_witness::ExecutionWitnessResult; use ethrex_common::types::requests::{EncodedRequests, Requests, compute_requests_hash}; use ethrex_common::types::{ @@ -951,7 +951,14 @@ pub fn validate_block( validate_block_header(&block.header, parent_header, elasticity_multiplier) .map_err(InvalidBlockError::from)?; - if chain_config.is_prague_activated(block.header.timestamp) { + if chain_config.is_osaka_activated(block.header.timestamp) { + let block_size = std::mem::size_of_val(block) as u64; + if block_size > MAX_BLOCK_SIZE { + return Err(error::ChainError::InvalidBlock( + InvalidBlockError::MaximumSizeExceeded(MAX_BLOCK_SIZE, block_size), + )); + } + } else if chain_config.is_prague_activated(block.header.timestamp) { validate_prague_header_fields(&block.header, parent_header, chain_config) .map_err(InvalidBlockError::from)?; verify_blob_gas_usage(block, chain_config)?; diff --git a/crates/blockchain/error.rs b/crates/blockchain/error.rs index e8eaaa891b5..04d79367fe9 100644 --- a/crates/blockchain/error.rs +++ b/crates/blockchain/error.rs @@ -64,6 +64,8 @@ pub enum InvalidBlockError { BlobGasUsedMismatch, #[error("Invalid transaction: {0}")] InvalidTransaction(String), + #[error("Maximum block size exceeded: Maximum is {0} MiB, but block was {1} MiB")] + MaximumSizeExceeded(u64, u64), } #[derive(Debug, thiserror::Error)] diff --git a/crates/common/constants.rs b/crates/common/constants.rs index 56aab11fe9d..39e0b96c243 100644 --- a/crates/common/constants.rs +++ b/crates/common/constants.rs @@ -57,3 +57,9 @@ pub const GAS_PER_BLOB: u32 = 1 << 17; // Minimum base fee per blob pub const MIN_BASE_FEE_PER_BLOB_GAS: u64 = 1; + +// === EIP-7934 constants === + +pub const MAX_BLOCK_SIZE: u64 = 10_485_760; +pub const SAFETY_MARGIN: u64 = 2_097_152; +pub const MAX_RLP_BLOCK_SIZE: u64 = MAX_BLOCK_SIZE - SAFETY_MARGIN; diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index 12b2dee5469..a480399e8b4 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -34,7 +34,7 @@ pub fn parse_and_execute( let tests = parse_tests(path); //Test with the Fusaka tests that should pass. TODO: Once we've implemented all the Fusaka EIPs this should be removed //EIPs should be added as strings in the format 'eip-XXXX' - let fusaka_eips_to_test: Vec<&str> = vec![]; + let fusaka_eips_to_test: Vec<&str> = vec!["eip-7934"]; //Hashes of any other tests to run, that don't correspond to an especific EIP (for examples, some integration tests) //We should really remove this once we're finished with implementing Fusaka, but it's a good-enough workaround to run specific tests for now From 645e52fdf18ef31cf327217001d5a176c947f5ae Mon Sep 17 00:00:00 2001 From: SDartayet Date: Fri, 29 Aug 2025 15:37:08 -0300 Subject: [PATCH 16/55] Added initial implementation --- crates/blockchain/blockchain.rs | 8 +++++--- crates/common/types/block.rs | 11 ++++++++++- crates/networking/p2p/rlpx/l2/l2_connection.rs | 5 +---- crates/networking/p2p/sync.rs | 2 +- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index 82c7de4182c..a5ad8099068 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -952,10 +952,12 @@ pub fn validate_block( .map_err(InvalidBlockError::from)?; if chain_config.is_osaka_activated(block.header.timestamp) { - let block_size = std::mem::size_of_val(block) as u64; - if block_size > MAX_BLOCK_SIZE { + let mut buf: Vec = Vec::new(); + let block_rlp_size = + block.cached_body_rlp_encode.len() as u64 + block.cached_header_rlp_encode.len() as u64; + if block_rlp_size > MAX_BLOCK_SIZE { return Err(error::ChainError::InvalidBlock( - InvalidBlockError::MaximumSizeExceeded(MAX_BLOCK_SIZE, block_size), + InvalidBlockError::MaximumSizeExceeded(MAX_BLOCK_SIZE, block_rlp_size), )); } } else if chain_config.is_prague_activated(block.header.timestamp) { diff --git a/crates/common/types/block.rs b/crates/common/types/block.rs index a97d2fe4df2..f86ef1f4a4b 100644 --- a/crates/common/types/block.rs +++ b/crates/common/types/block.rs @@ -36,11 +36,20 @@ use once_cell::sync::OnceCell; pub struct Block { pub header: BlockHeader, pub body: BlockBody, + pub cached_body_rlp_encode: Vec, + pub cached_header_rlp_encode: Vec, } impl Block { pub fn new(header: BlockHeader, body: BlockBody) -> Block { - Block { header, body } + let body_rlp = body.encode_to_vec(); + let header_rlp = header.encode_to_vec(); + Block { + header, + body, + cached_body_rlp_encode: body_rlp, + cached_header_rlp_encode: header_rlp, + } } pub fn hash(&self) -> BlockHash { diff --git a/crates/networking/p2p/rlpx/l2/l2_connection.rs b/crates/networking/p2p/rlpx/l2/l2_connection.rs index 091974bf8a0..7649ccdb85d 100644 --- a/crates/networking/p2p/rlpx/l2/l2_connection.rs +++ b/crates/networking/p2p/rlpx/l2/l2_connection.rs @@ -202,10 +202,7 @@ pub(crate) async fn send_new_block(established: &mut Established) -> Result<(), "Block header not found after querying for the block number".to_owned(), ), )?; - let new_block = Block { - header: new_block_header, - body: new_block_body, - }; + let new_block = Block::new(new_block_header, new_block_body); let signature = match l2_state .store_rollup .get_signature_by_block(new_block.hash()) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 0487bad808e..90550e75637 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -545,7 +545,7 @@ impl FullBlockSyncState { .current_headers .drain(..bodies.len()) .zip(bodies) - .map(|(header, body)| Block { header, body }); + .map(|(header, body)| Block::new(header, body)); self.current_blocks.extend(blocks); } // Execute full blocks From c8b241acde6c0682632ca922da2111188c254cd9 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Fri, 29 Aug 2025 17:58:31 -0300 Subject: [PATCH 17/55] Implemented size limit on block validation --- crates/blockchain/blockchain.rs | 11 ++++++----- crates/common/types/block.rs | 24 ++++++++++++++++++++++++ crates/l2/monitor/widget/blocks.rs | 2 +- crates/storage/store_db/libmdbx.rs | 4 ++-- 4 files changed, 33 insertions(+), 8 deletions(-) diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index a5ad8099068..7f10e9769ba 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -12,7 +12,9 @@ use bytes::Bytes; use constants::{MAX_INITCODE_SIZE, MAX_TRANSACTION_DATA_SIZE}; use error::MempoolError; use error::{ChainError, InvalidBlockError}; -use ethrex_common::constants::{GAS_PER_BLOB, MAX_BLOCK_SIZE, MIN_BASE_FEE_PER_BLOB_GAS}; +use ethrex_common::constants::{ + GAS_PER_BLOB, MAX_BLOCK_SIZE, MAX_RLP_BLOCK_SIZE, MIN_BASE_FEE_PER_BLOB_GAS, +}; use ethrex_common::types::block_execution_witness::ExecutionWitnessResult; use ethrex_common::types::requests::{EncodedRequests, Requests, compute_requests_hash}; use ethrex_common::types::{ @@ -25,6 +27,7 @@ use ethrex_common::types::{ELASTICITY_MULTIPLIER, P2PTransaction}; use ethrex_common::types::{Fork, MempoolTransaction}; use ethrex_common::{Address, H256, TrieLogger}; use ethrex_metrics::metrics; +use ethrex_rlp::encode::RLPEncode; use ethrex_storage::{ AccountUpdatesList, Store, UpdateBatch, error::StoreError, hash_address, hash_key, }; @@ -952,10 +955,8 @@ pub fn validate_block( .map_err(InvalidBlockError::from)?; if chain_config.is_osaka_activated(block.header.timestamp) { - let mut buf: Vec = Vec::new(); - let block_rlp_size = - block.cached_body_rlp_encode.len() as u64 + block.cached_header_rlp_encode.len() as u64; - if block_rlp_size > MAX_BLOCK_SIZE { + let block_rlp_size = block.get_rlp_encode_size(); + if block_rlp_size as u64 > MAX_RLP_BLOCK_SIZE { return Err(error::ChainError::InvalidBlock( InvalidBlockError::MaximumSizeExceeded(MAX_BLOCK_SIZE, block_rlp_size), )); diff --git a/crates/common/types/block.rs b/crates/common/types/block.rs index f86ef1f4a4b..df668416e2a 100644 --- a/crates/common/types/block.rs +++ b/crates/common/types/block.rs @@ -47,6 +47,9 @@ impl Block { Block { header, body, + + // Cache these to avoid doing repeat calculations, since we need the RLP encoded size when checking for the size limit + // We cache the RLP encodes of the body and the header rather than the entire block because these are the ones that are stored cached_body_rlp_encode: body_rlp, cached_header_rlp_encode: header_rlp, } @@ -55,6 +58,27 @@ impl Block { pub fn hash(&self) -> BlockHash { self.header.hash() } + + // Calculate the size of the RLP encode of the block + // We need the RLP encode in two places: in the block validation to check its size doesn't exceed the maximum, and in the store to save it + // However, the store uses the encode of the body and the header, whereas for the max size we need the size of the encode of the block as a whole + // So we cache the body and header, and calculate the size of the block encode based off of them + pub fn get_rlp_encode_size(&self) -> u64 { + let body_fields_rlp_size = if self.cached_body_rlp_encode[0] <= 0xf7 { + self.cached_body_rlp_encode.len() - 1 + } else { + self.cached_body_rlp_encode.len() - (self.cached_body_rlp_encode[0] as usize - 0xf7) - 1 + }; + let header_rlp_size = self.cached_header_rlp_encode.len(); + + let block_rlp_payload_length = header_rlp_size + body_fields_rlp_size; + if block_rlp_payload_length > 55 { + 1 + (block_rlp_payload_length as f64).log(256.0).ceil() as u64 + + block_rlp_payload_length as u64 + } else { + 1 + block_rlp_payload_length as u64 + } + } } impl RLPEncode for Block { diff --git a/crates/l2/monitor/widget/blocks.rs b/crates/l2/monitor/widget/blocks.rs index 455efdeef45..f6ed1c4c89a 100644 --- a/crates/l2/monitor/widget/blocks.rs +++ b/crates/l2/monitor/widget/blocks.rs @@ -113,7 +113,7 @@ impl BlocksTable { block.header.coinbase, block.header.gas_used, block.header.blob_gas_used, - block.encode_to_vec().len(), + block.get_rlp_encode_size(), ) }) .collect::>(); diff --git a/crates/storage/store_db/libmdbx.rs b/crates/storage/store_db/libmdbx.rs index 9a469aa6a09..417717c73c0 100644 --- a/crates/storage/store_db/libmdbx.rs +++ b/crates/storage/store_db/libmdbx.rs @@ -169,13 +169,13 @@ impl StoreEngine for Store { tx.upsert::( hash.into(), - BlockBodyRLP::from_bytes(block.body.encode_to_vec()), + BlockBodyRLP::from_bytes(block.cached_body_rlp_encode), ) .map_err(StoreError::LibmdbxError)?; tx.upsert::( hash.into(), - BlockHeaderRLP::from_bytes(block.header.encode_to_vec()), + BlockHeaderRLP::from_bytes(block.cached_header_rlp_encode), ) .map_err(StoreError::LibmdbxError)?; From c77b49178cad4cce0f48e096c9de80a5690c4d2c Mon Sep 17 00:00:00 2001 From: SDartayet Date: Fri, 29 Aug 2025 18:06:24 -0300 Subject: [PATCH 18/55] Small fixes --- crates/blockchain/blockchain.rs | 7 ++----- crates/l2/monitor/widget/blocks.rs | 3 +-- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index 7f10e9769ba..88e3c588d23 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -12,9 +12,7 @@ use bytes::Bytes; use constants::{MAX_INITCODE_SIZE, MAX_TRANSACTION_DATA_SIZE}; use error::MempoolError; use error::{ChainError, InvalidBlockError}; -use ethrex_common::constants::{ - GAS_PER_BLOB, MAX_BLOCK_SIZE, MAX_RLP_BLOCK_SIZE, MIN_BASE_FEE_PER_BLOB_GAS, -}; +use ethrex_common::constants::{GAS_PER_BLOB, MAX_RLP_BLOCK_SIZE, MIN_BASE_FEE_PER_BLOB_GAS}; use ethrex_common::types::block_execution_witness::ExecutionWitnessResult; use ethrex_common::types::requests::{EncodedRequests, Requests, compute_requests_hash}; use ethrex_common::types::{ @@ -27,7 +25,6 @@ use ethrex_common::types::{ELASTICITY_MULTIPLIER, P2PTransaction}; use ethrex_common::types::{Fork, MempoolTransaction}; use ethrex_common::{Address, H256, TrieLogger}; use ethrex_metrics::metrics; -use ethrex_rlp::encode::RLPEncode; use ethrex_storage::{ AccountUpdatesList, Store, UpdateBatch, error::StoreError, hash_address, hash_key, }; @@ -958,7 +955,7 @@ pub fn validate_block( let block_rlp_size = block.get_rlp_encode_size(); if block_rlp_size as u64 > MAX_RLP_BLOCK_SIZE { return Err(error::ChainError::InvalidBlock( - InvalidBlockError::MaximumSizeExceeded(MAX_BLOCK_SIZE, block_rlp_size), + InvalidBlockError::MaximumSizeExceeded(MAX_RLP_BLOCK_SIZE, block_rlp_size), )); } } else if chain_config.is_prague_activated(block.header.timestamp) { diff --git a/crates/l2/monitor/widget/blocks.rs b/crates/l2/monitor/widget/blocks.rs index f6ed1c4c89a..fa082b5ca29 100644 --- a/crates/l2/monitor/widget/blocks.rs +++ b/crates/l2/monitor/widget/blocks.rs @@ -1,7 +1,6 @@ use std::cmp::min; use ethrex_common::{Address, H256, types::Block}; -use ethrex_rlp::encode::RLPEncode; use ethrex_storage::Store; use ratatui::{ buffer::Buffer, @@ -113,7 +112,7 @@ impl BlocksTable { block.header.coinbase, block.header.gas_used, block.header.blob_gas_used, - block.get_rlp_encode_size(), + block.get_rlp_encode_size() as usize, ) }) .collect::>(); From b5c55417cef5e214a9a36eed2c225b22e2c8e511 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Thu, 4 Sep 2025 10:59:40 -0300 Subject: [PATCH 19/55] Fixed linting errors --- crates/blockchain/blockchain.rs | 4 ++-- crates/common/types/block.rs | 8 ++++---- crates/l2/monitor/widget/blocks.rs | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index 88e3c588d23..d0f90ca9f38 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -953,9 +953,9 @@ pub fn validate_block( if chain_config.is_osaka_activated(block.header.timestamp) { let block_rlp_size = block.get_rlp_encode_size(); - if block_rlp_size as u64 > MAX_RLP_BLOCK_SIZE { + if block_rlp_size > MAX_RLP_BLOCK_SIZE as usize { return Err(error::ChainError::InvalidBlock( - InvalidBlockError::MaximumSizeExceeded(MAX_RLP_BLOCK_SIZE, block_rlp_size), + InvalidBlockError::MaximumSizeExceeded(MAX_RLP_BLOCK_SIZE, block_rlp_size as u64), )); } } else if chain_config.is_prague_activated(block.header.timestamp) { diff --git a/crates/common/types/block.rs b/crates/common/types/block.rs index df668416e2a..79b4d29ce74 100644 --- a/crates/common/types/block.rs +++ b/crates/common/types/block.rs @@ -63,7 +63,7 @@ impl Block { // We need the RLP encode in two places: in the block validation to check its size doesn't exceed the maximum, and in the store to save it // However, the store uses the encode of the body and the header, whereas for the max size we need the size of the encode of the block as a whole // So we cache the body and header, and calculate the size of the block encode based off of them - pub fn get_rlp_encode_size(&self) -> u64 { + pub fn get_rlp_encode_size(&self) -> usize { let body_fields_rlp_size = if self.cached_body_rlp_encode[0] <= 0xf7 { self.cached_body_rlp_encode.len() - 1 } else { @@ -73,10 +73,10 @@ impl Block { let block_rlp_payload_length = header_rlp_size + body_fields_rlp_size; if block_rlp_payload_length > 55 { - 1 + (block_rlp_payload_length as f64).log(256.0).ceil() as u64 - + block_rlp_payload_length as u64 + 1 + (block_rlp_payload_length as f64).log(256.0).ceil() as usize + + block_rlp_payload_length } else { - 1 + block_rlp_payload_length as u64 + 1 + block_rlp_payload_length } } } diff --git a/crates/l2/monitor/widget/blocks.rs b/crates/l2/monitor/widget/blocks.rs index fa082b5ca29..4573270cab9 100644 --- a/crates/l2/monitor/widget/blocks.rs +++ b/crates/l2/monitor/widget/blocks.rs @@ -112,7 +112,7 @@ impl BlocksTable { block.header.coinbase, block.header.gas_used, block.header.blob_gas_used, - block.get_rlp_encode_size() as usize, + block.get_rlp_encode_size(), ) }) .collect::>(); From dadc3fcfba79e2790f37b2f63a1e4d785da7e76d Mon Sep 17 00:00:00 2001 From: SDartayet Date: Thu, 4 Sep 2025 11:07:30 -0300 Subject: [PATCH 20/55] Fixed small error --- crates/networking/p2p/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 4d160fe0a65..10eeadca3c2 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -613,7 +613,7 @@ impl FullBlockSyncState { .current_headers .drain(..bodies.len()) .zip(bodies) - .map(|(header, body)| Block { header, body }); + .map(|(header, body)| Block::new(header, body)); self.current_blocks.extend(blocks); // } // Execute full blocks From 0cfbc2c0c1c04195e0ed5d74be696005b66a8124 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Thu, 4 Sep 2025 15:54:53 -0300 Subject: [PATCH 21/55] Skipped block RLP cached elements from serde and rkyv --- crates/common/types/block.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/common/types/block.rs b/crates/common/types/block.rs index 79b4d29ce74..373f943f1f0 100644 --- a/crates/common/types/block.rs +++ b/crates/common/types/block.rs @@ -20,7 +20,7 @@ use ethrex_rlp::{ use ethrex_trie::Trie; use keccak_hash::keccak; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; -use rkyv::{Archive, Deserialize as RDeserialize, Serialize as RSerialize}; +use rkyv::{Archive, Deserialize as RDeserialize, Serialize as RSerialize, with::Skip}; use serde::{Deserialize, Serialize}; use std::cmp::{Ordering, max}; @@ -36,7 +36,11 @@ use once_cell::sync::OnceCell; pub struct Block { pub header: BlockHeader, pub body: BlockBody, + #[serde(skip)] + #[rkyv(with = Skip)] pub cached_body_rlp_encode: Vec, + #[serde(skip)] + #[rkyv(with = Skip)] pub cached_header_rlp_encode: Vec, } From 74b180fccdf862da43a5d2f757b506608d1193f9 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Fri, 5 Sep 2025 11:21:59 -0300 Subject: [PATCH 22/55] Fix --- crates/storage/store_db/libmdbx.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/store_db/libmdbx.rs b/crates/storage/store_db/libmdbx.rs index 3b803a8341d..ed6b362a660 100644 --- a/crates/storage/store_db/libmdbx.rs +++ b/crates/storage/store_db/libmdbx.rs @@ -169,13 +169,13 @@ impl StoreEngine for Store { tx.upsert::( hash.into(), - BlockBodyRLP::from_bytes(block.cached_body_rlp_encode), + BlockBodyRLP::from_bytes(block.body.encode_to_vec()), ) .map_err(StoreError::LibmdbxError)?; tx.upsert::( hash.into(), - BlockHeaderRLP::from_bytes(block.cached_header_rlp_encode), + BlockHeaderRLP::from_bytes(block.header.encode_to_vec()), ) .map_err(StoreError::LibmdbxError)?; From cc33e10abd9d7fc6a6da07f3fbae57866da2dd5b Mon Sep 17 00:00:00 2001 From: SDartayet Date: Fri, 5 Sep 2025 12:14:32 -0300 Subject: [PATCH 23/55] Fix --- crates/storage/store_db/libmdbx.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/storage/store_db/libmdbx.rs b/crates/storage/store_db/libmdbx.rs index ed6b362a660..e8d773ad234 100644 --- a/crates/storage/store_db/libmdbx.rs +++ b/crates/storage/store_db/libmdbx.rs @@ -169,13 +169,13 @@ impl StoreEngine for Store { tx.upsert::( hash.into(), - BlockBodyRLP::from_bytes(block.body.encode_to_vec()), + BlockBodyRLP::from_bytes(block.cached_body_rlp_encode), ) .map_err(StoreError::LibmdbxError)?; tx.upsert::( hash.into(), - BlockHeaderRLP::from_bytes(block.header.encode_to_vec()), + BlockHeaderRLP::from_bytes(block.cached_header_rlp_encode), ) .map_err(StoreError::LibmdbxError)?; @@ -275,13 +275,13 @@ impl StoreEngine for Store { tx.upsert::( hash.into(), - BlockBodyRLP::from_bytes(block.body.encode_to_vec()), + BlockBodyRLP::from_bytes(block.cached_body_rlp_encode), ) .map_err(StoreError::LibmdbxError)?; tx.upsert::( hash.into(), - BlockHeaderRLP::from_bytes(block.header.encode_to_vec()), + BlockHeaderRLP::from_bytes(block.cached_header_rlp_encode), ) .map_err(StoreError::LibmdbxError)?; From e4aa61d524023c03b9a508019fe7732effe6853f Mon Sep 17 00:00:00 2001 From: SDartayet Date: Fri, 5 Sep 2025 15:26:22 -0300 Subject: [PATCH 24/55] Changed block initialization --- crates/common/types/block.rs | 31 +++++++++++++++++------------- crates/storage/store_db/libmdbx.rs | 12 ++++++++++-- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/crates/common/types/block.rs b/crates/common/types/block.rs index 373f943f1f0..6e1ec384d9c 100644 --- a/crates/common/types/block.rs +++ b/crates/common/types/block.rs @@ -36,26 +36,24 @@ use once_cell::sync::OnceCell; pub struct Block { pub header: BlockHeader, pub body: BlockBody, + + // Cache these to avoid doing repeat calculations, since we need the RLP encoded size when checking for the size limit + // We cache the RLP encodes of the body and the header rather than the entire block because these are the ones that are stored #[serde(skip)] #[rkyv(with = Skip)] - pub cached_body_rlp_encode: Vec, + pub cached_body_rlp_encode: OnceCell>, #[serde(skip)] #[rkyv(with = Skip)] - pub cached_header_rlp_encode: Vec, + pub cached_header_rlp_encode: OnceCell>, } impl Block { pub fn new(header: BlockHeader, body: BlockBody) -> Block { - let body_rlp = body.encode_to_vec(); - let header_rlp = header.encode_to_vec(); Block { header, body, - - // Cache these to avoid doing repeat calculations, since we need the RLP encoded size when checking for the size limit - // We cache the RLP encodes of the body and the header rather than the entire block because these are the ones that are stored - cached_body_rlp_encode: body_rlp, - cached_header_rlp_encode: header_rlp, + cached_body_rlp_encode: OnceCell::new(), + cached_header_rlp_encode: OnceCell::new(), } } @@ -68,12 +66,19 @@ impl Block { // However, the store uses the encode of the body and the header, whereas for the max size we need the size of the encode of the block as a whole // So we cache the body and header, and calculate the size of the block encode based off of them pub fn get_rlp_encode_size(&self) -> usize { - let body_fields_rlp_size = if self.cached_body_rlp_encode[0] <= 0xf7 { - self.cached_body_rlp_encode.len() - 1 + let cached_body_rlp_encode = self + .cached_body_rlp_encode + .get_or_init(|| self.body.encode_to_vec()); + let cached_header_rlp_encode = self + .cached_header_rlp_encode + .get_or_init(|| self.header.encode_to_vec()); + + let body_fields_rlp_size = if cached_body_rlp_encode[0] <= 0xf7 { + cached_body_rlp_encode.len() - 1 } else { - self.cached_body_rlp_encode.len() - (self.cached_body_rlp_encode[0] as usize - 0xf7) - 1 + cached_body_rlp_encode.len() - (cached_body_rlp_encode[0] as usize - 0xf7) - 1 }; - let header_rlp_size = self.cached_header_rlp_encode.len(); + let header_rlp_size = cached_header_rlp_encode.len(); let block_rlp_payload_length = header_rlp_size + body_fields_rlp_size; if block_rlp_payload_length > 55 { diff --git a/crates/storage/store_db/libmdbx.rs b/crates/storage/store_db/libmdbx.rs index e8d773ad234..a6fff4067a0 100644 --- a/crates/storage/store_db/libmdbx.rs +++ b/crates/storage/store_db/libmdbx.rs @@ -169,13 +169,21 @@ impl StoreEngine for Store { tx.upsert::( hash.into(), - BlockBodyRLP::from_bytes(block.cached_body_rlp_encode), + BlockBodyRLP::from_bytes( + block + .cached_body_rlp_encode + .get_or_init(|| block.body.encode_to_vec()), + ), ) .map_err(StoreError::LibmdbxError)?; tx.upsert::( hash.into(), - BlockHeaderRLP::from_bytes(block.cached_header_rlp_encode), + BlockHeaderRLP::from_bytes( + block + .cached_header_rlp_encode + .get_or_init(|| block.header.encode_to_vec()), + ), ) .map_err(StoreError::LibmdbxError)?; From 5d891c52d0bc8f9964e348b08a2d5b892e4c510c Mon Sep 17 00:00:00 2001 From: SDartayet Date: Fri, 5 Sep 2025 15:30:52 -0300 Subject: [PATCH 25/55] Fix to previous commit --- crates/storage/store_db/libmdbx.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/storage/store_db/libmdbx.rs b/crates/storage/store_db/libmdbx.rs index a6fff4067a0..62173aca559 100644 --- a/crates/storage/store_db/libmdbx.rs +++ b/crates/storage/store_db/libmdbx.rs @@ -283,13 +283,21 @@ impl StoreEngine for Store { tx.upsert::( hash.into(), - BlockBodyRLP::from_bytes(block.cached_body_rlp_encode), + BlockBodyRLP::from_bytes( + block + .cached_body_rlp_encode + .get_or_init(|| block.body.encode_to_vec()), + ), ) .map_err(StoreError::LibmdbxError)?; tx.upsert::( hash.into(), - BlockHeaderRLP::from_bytes(block.cached_header_rlp_encode), + BlockHeaderRLP::from_bytes( + block + .cached_header_rlp_encode + .get_or_init(|| block.header.encode_to_vec()), + ), ) .map_err(StoreError::LibmdbxError)?; From 2a1b13be36014d7d5672929326c9e49ef984ad63 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Fri, 5 Sep 2025 15:44:20 -0300 Subject: [PATCH 26/55] Fixing compile errors --- crates/storage/store_db/libmdbx.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/crates/storage/store_db/libmdbx.rs b/crates/storage/store_db/libmdbx.rs index 62173aca559..e2c501818e8 100644 --- a/crates/storage/store_db/libmdbx.rs +++ b/crates/storage/store_db/libmdbx.rs @@ -154,7 +154,7 @@ impl StoreEngine for Store { .map_err(StoreError::LibmdbxError)?; } } - for block in update_batch.blocks { + for mut block in update_batch.blocks { // store block let number = block.header.number; let hash = block.hash(); @@ -172,7 +172,8 @@ impl StoreEngine for Store { BlockBodyRLP::from_bytes( block .cached_body_rlp_encode - .get_or_init(|| block.body.encode_to_vec()), + .take() + .unwrap_or(block.body.encode_to_vec()), ), ) .map_err(StoreError::LibmdbxError)?; @@ -182,7 +183,8 @@ impl StoreEngine for Store { BlockHeaderRLP::from_bytes( block .cached_header_rlp_encode - .get_or_init(|| block.header.encode_to_vec()), + .take() + .unwrap_or(block.header.encode_to_vec()), ), ) .map_err(StoreError::LibmdbxError)?; @@ -269,7 +271,7 @@ impl StoreEngine for Store { tokio::task::spawn_blocking(move || { let tx = db.begin_readwrite().map_err(StoreError::LibmdbxError)?; - for block in blocks { + for mut block in blocks { let number = block.header.number; let hash = block.hash(); @@ -286,7 +288,8 @@ impl StoreEngine for Store { BlockBodyRLP::from_bytes( block .cached_body_rlp_encode - .get_or_init(|| block.body.encode_to_vec()), + .take() + .unwrap_or(block.body.encode_to_vec()), ), ) .map_err(StoreError::LibmdbxError)?; @@ -296,7 +299,8 @@ impl StoreEngine for Store { BlockHeaderRLP::from_bytes( block .cached_header_rlp_encode - .get_or_init(|| block.header.encode_to_vec()), + .take() + .unwrap_or(block.header.encode_to_vec()), ), ) .map_err(StoreError::LibmdbxError)?; From 74730251986c367145f1b4a0056adf100ea527cc Mon Sep 17 00:00:00 2001 From: SDartayet <44068466+SDartayet@users.noreply.github.com> Date: Fri, 5 Sep 2025 17:01:11 -0300 Subject: [PATCH 27/55] Update test_runner.rs --- tooling/ef_tests/blockchain/test_runner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index cd490ded173..be3e9ca6067 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -35,7 +35,7 @@ pub fn parse_and_execute( //Test with the Fusaka tests that should pass. TODO: Once we've implemented all the Fusaka EIPs this should be removed //EIPs should be added as strings in the format 'eip-XXXX' let fusaka_eips_to_test: Vec<&str> = vec!["eip-7594", "eip-7883", "eip-7934", "eip-7939"]; - + //Hashes of any other tests to run, that don't correspond to an especific EIP (for examples, some integration tests) //We should really remove this once we're finished with implementing Fusaka, but it's a good-enough workaround to run specific tests for now let hashes_of_fusaka_tests_to_run: Vec<&str> = vec![ From 12d916d0da1061eb750b510def55b97b0efaf13c Mon Sep 17 00:00:00 2001 From: SDartayet <44068466+SDartayet@users.noreply.github.com> Date: Fri, 5 Sep 2025 17:19:07 -0300 Subject: [PATCH 28/55] Update test_runner.rs --- tooling/ef_tests/blockchain/test_runner.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index be3e9ca6067..aa22f918dc0 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -35,13 +35,12 @@ pub fn parse_and_execute( //Test with the Fusaka tests that should pass. TODO: Once we've implemented all the Fusaka EIPs this should be removed //EIPs should be added as strings in the format 'eip-XXXX' let fusaka_eips_to_test: Vec<&str> = vec!["eip-7594", "eip-7883", "eip-7934", "eip-7939"]; - //Hashes of any other tests to run, that don't correspond to an especific EIP (for examples, some integration tests) //We should really remove this once we're finished with implementing Fusaka, but it's a good-enough workaround to run specific tests for now let hashes_of_fusaka_tests_to_run: Vec<&str> = vec![ "0xf0672af9718013a1f396a9268e91e220ff09e7fa97480844e31da500f8ef291f", //All opcodes test ]; - + let mut failures = Vec::new(); for (test_key, test) in tests { From 3634bd38bf118d3b6432dfe792dd5a4d6194b49c Mon Sep 17 00:00:00 2001 From: SDartayet <44068466+SDartayet@users.noreply.github.com> Date: Fri, 5 Sep 2025 17:23:53 -0300 Subject: [PATCH 29/55] Update test_runner.rs --- tooling/ef_tests/blockchain/test_runner.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index aa22f918dc0..6f660c429b3 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -40,7 +40,6 @@ pub fn parse_and_execute( let hashes_of_fusaka_tests_to_run: Vec<&str> = vec![ "0xf0672af9718013a1f396a9268e91e220ff09e7fa97480844e31da500f8ef291f", //All opcodes test ]; - let mut failures = Vec::new(); for (test_key, test) in tests { From ff21cc91f42664e4c7754895093da81e0fed62ef Mon Sep 17 00:00:00 2001 From: SDartayet Date: Mon, 8 Sep 2025 11:56:44 -0300 Subject: [PATCH 30/55] Added block size check to the payload module --- crates/blockchain/payload.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/crates/blockchain/payload.rs b/crates/blockchain/payload.rs index 19ed847be15..e26a6e41d1a 100644 --- a/crates/blockchain/payload.rs +++ b/crates/blockchain/payload.rs @@ -8,12 +8,13 @@ use std::{ use ethrex_common::{ Address, Bloom, Bytes, H256, U256, - constants::{DEFAULT_OMMERS_HASH, DEFAULT_REQUESTS_HASH, GAS_PER_BLOB}, + constants::{DEFAULT_OMMERS_HASH, DEFAULT_REQUESTS_HASH, GAS_PER_BLOB, MAX_RLP_BLOCK_SIZE}, types::{ AccountUpdate, BlobsBundle, Block, BlockBody, BlockHash, BlockHeader, BlockNumber, - ChainConfig, MempoolTransaction, Receipt, Transaction, TxType, Withdrawal, bloom_from_logs, - calc_excess_blob_gas, calculate_base_fee_per_blob_gas, calculate_base_fee_per_gas, - compute_receipts_root, compute_transactions_root, compute_withdrawals_root, + ChainConfig, Fork, MempoolTransaction, Receipt, Transaction, TxType, Withdrawal, + bloom_from_logs, calc_excess_blob_gas, calculate_base_fee_per_blob_gas, + calculate_base_fee_per_gas, compute_receipts_root, compute_transactions_root, + compute_withdrawals_root, requests::{EncodedRequests, compute_requests_hash}, }, }; @@ -216,6 +217,7 @@ pub struct PayloadBuildContext { pub store: Store, pub vm: Evm, pub account_updates: Vec, + pub size: usize, } impl PayloadBuildContext { @@ -242,6 +244,8 @@ impl PayloadBuildContext { BlockchainType::L2 => Evm::new_for_l2(evm_engine, vm_db)?, }; + let size = payload.encode_to_vec().len(); + Ok(PayloadBuildContext { remaining_gas: payload.header.gas_limit, receipts: vec![], @@ -255,6 +259,7 @@ impl PayloadBuildContext { store: storage.clone(), vm, account_updates: Vec::new(), + size, }) } } @@ -520,6 +525,14 @@ impl Blockchain { continue; } + // Check whether the transaction wouldn't put the block above the size limit + // https://eips.ethereum.org/EIPS/eip-7934 + let tx_rlp_size = head_tx.encode_to_vec().len(); + if tx_rlp_size + context.size > MAX_RLP_BLOCK_SIZE as usize { + break; + } + context.size += tx_rlp_size; + // TODO: maybe fetch hash too when filtering mempool so we don't have to compute it here (we can do this in the same refactor as adding timestamp) let tx_hash = head_tx.tx.hash(); From 652dd943633c3b00454d46b6db4a8b3289365f88 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Mon, 8 Sep 2025 11:57:43 -0300 Subject: [PATCH 31/55] Revert "Added block size check to the payload module" This reverts commit 50e783ce7420d7255f57a7b0e86ef033d59db3bd. --- crates/blockchain/payload.rs | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/crates/blockchain/payload.rs b/crates/blockchain/payload.rs index e26a6e41d1a..19ed847be15 100644 --- a/crates/blockchain/payload.rs +++ b/crates/blockchain/payload.rs @@ -8,13 +8,12 @@ use std::{ use ethrex_common::{ Address, Bloom, Bytes, H256, U256, - constants::{DEFAULT_OMMERS_HASH, DEFAULT_REQUESTS_HASH, GAS_PER_BLOB, MAX_RLP_BLOCK_SIZE}, + constants::{DEFAULT_OMMERS_HASH, DEFAULT_REQUESTS_HASH, GAS_PER_BLOB}, types::{ AccountUpdate, BlobsBundle, Block, BlockBody, BlockHash, BlockHeader, BlockNumber, - ChainConfig, Fork, MempoolTransaction, Receipt, Transaction, TxType, Withdrawal, - bloom_from_logs, calc_excess_blob_gas, calculate_base_fee_per_blob_gas, - calculate_base_fee_per_gas, compute_receipts_root, compute_transactions_root, - compute_withdrawals_root, + ChainConfig, MempoolTransaction, Receipt, Transaction, TxType, Withdrawal, bloom_from_logs, + calc_excess_blob_gas, calculate_base_fee_per_blob_gas, calculate_base_fee_per_gas, + compute_receipts_root, compute_transactions_root, compute_withdrawals_root, requests::{EncodedRequests, compute_requests_hash}, }, }; @@ -217,7 +216,6 @@ pub struct PayloadBuildContext { pub store: Store, pub vm: Evm, pub account_updates: Vec, - pub size: usize, } impl PayloadBuildContext { @@ -244,8 +242,6 @@ impl PayloadBuildContext { BlockchainType::L2 => Evm::new_for_l2(evm_engine, vm_db)?, }; - let size = payload.encode_to_vec().len(); - Ok(PayloadBuildContext { remaining_gas: payload.header.gas_limit, receipts: vec![], @@ -259,7 +255,6 @@ impl PayloadBuildContext { store: storage.clone(), vm, account_updates: Vec::new(), - size, }) } } @@ -525,14 +520,6 @@ impl Blockchain { continue; } - // Check whether the transaction wouldn't put the block above the size limit - // https://eips.ethereum.org/EIPS/eip-7934 - let tx_rlp_size = head_tx.encode_to_vec().len(); - if tx_rlp_size + context.size > MAX_RLP_BLOCK_SIZE as usize { - break; - } - context.size += tx_rlp_size; - // TODO: maybe fetch hash too when filtering mempool so we don't have to compute it here (we can do this in the same refactor as adding timestamp) let tx_hash = head_tx.tx.hash(); From c7911f2acbdb631175cd4cf0701e3e84e5cf43c7 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Tue, 9 Sep 2025 11:32:41 -0300 Subject: [PATCH 32/55] Removed RLP caching --- crates/blockchain/blockchain.rs | 3 +- crates/common/types/block.rs | 46 +------------------ crates/l2/monitor/widget/blocks.rs | 3 +- .../networking/p2p/rlpx/l2/l2_connection.rs | 5 +- crates/networking/p2p/sync.rs | 2 +- crates/storage/store_db/libmdbx.rs | 32 +++---------- 6 files changed, 17 insertions(+), 74 deletions(-) diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index addff0e9bd5..370b16034e5 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -24,6 +24,7 @@ use ethrex_common::types::{ELASTICITY_MULTIPLIER, P2PTransaction}; use ethrex_common::types::{Fork, MempoolTransaction}; use ethrex_common::{Address, H256, TrieLogger}; use ethrex_metrics::metrics; +use ethrex_rlp::encode::RLPEncode; use ethrex_storage::{ AccountUpdatesList, Store, UpdateBatch, error::StoreError, hash_address, hash_key, }; @@ -988,7 +989,7 @@ pub fn validate_block( .map_err(InvalidBlockError::from)?; if chain_config.is_osaka_activated(block.header.timestamp) { - let block_rlp_size = block.get_rlp_encode_size(); + let block_rlp_size = block.encode_to_vec().len(); if block_rlp_size > MAX_RLP_BLOCK_SIZE as usize { return Err(error::ChainError::InvalidBlock( InvalidBlockError::MaximumSizeExceeded(MAX_RLP_BLOCK_SIZE, block_rlp_size as u64), diff --git a/crates/common/types/block.rs b/crates/common/types/block.rs index 6e1ec384d9c..a97d2fe4df2 100644 --- a/crates/common/types/block.rs +++ b/crates/common/types/block.rs @@ -20,7 +20,7 @@ use ethrex_rlp::{ use ethrex_trie::Trie; use keccak_hash::keccak; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; -use rkyv::{Archive, Deserialize as RDeserialize, Serialize as RSerialize, with::Skip}; +use rkyv::{Archive, Deserialize as RDeserialize, Serialize as RSerialize}; use serde::{Deserialize, Serialize}; use std::cmp::{Ordering, max}; @@ -36,58 +36,16 @@ use once_cell::sync::OnceCell; pub struct Block { pub header: BlockHeader, pub body: BlockBody, - - // Cache these to avoid doing repeat calculations, since we need the RLP encoded size when checking for the size limit - // We cache the RLP encodes of the body and the header rather than the entire block because these are the ones that are stored - #[serde(skip)] - #[rkyv(with = Skip)] - pub cached_body_rlp_encode: OnceCell>, - #[serde(skip)] - #[rkyv(with = Skip)] - pub cached_header_rlp_encode: OnceCell>, } impl Block { pub fn new(header: BlockHeader, body: BlockBody) -> Block { - Block { - header, - body, - cached_body_rlp_encode: OnceCell::new(), - cached_header_rlp_encode: OnceCell::new(), - } + Block { header, body } } pub fn hash(&self) -> BlockHash { self.header.hash() } - - // Calculate the size of the RLP encode of the block - // We need the RLP encode in two places: in the block validation to check its size doesn't exceed the maximum, and in the store to save it - // However, the store uses the encode of the body and the header, whereas for the max size we need the size of the encode of the block as a whole - // So we cache the body and header, and calculate the size of the block encode based off of them - pub fn get_rlp_encode_size(&self) -> usize { - let cached_body_rlp_encode = self - .cached_body_rlp_encode - .get_or_init(|| self.body.encode_to_vec()); - let cached_header_rlp_encode = self - .cached_header_rlp_encode - .get_or_init(|| self.header.encode_to_vec()); - - let body_fields_rlp_size = if cached_body_rlp_encode[0] <= 0xf7 { - cached_body_rlp_encode.len() - 1 - } else { - cached_body_rlp_encode.len() - (cached_body_rlp_encode[0] as usize - 0xf7) - 1 - }; - let header_rlp_size = cached_header_rlp_encode.len(); - - let block_rlp_payload_length = header_rlp_size + body_fields_rlp_size; - if block_rlp_payload_length > 55 { - 1 + (block_rlp_payload_length as f64).log(256.0).ceil() as usize - + block_rlp_payload_length - } else { - 1 + block_rlp_payload_length - } - } } impl RLPEncode for Block { diff --git a/crates/l2/monitor/widget/blocks.rs b/crates/l2/monitor/widget/blocks.rs index 4573270cab9..455efdeef45 100644 --- a/crates/l2/monitor/widget/blocks.rs +++ b/crates/l2/monitor/widget/blocks.rs @@ -1,6 +1,7 @@ use std::cmp::min; use ethrex_common::{Address, H256, types::Block}; +use ethrex_rlp::encode::RLPEncode; use ethrex_storage::Store; use ratatui::{ buffer::Buffer, @@ -112,7 +113,7 @@ impl BlocksTable { block.header.coinbase, block.header.gas_used, block.header.blob_gas_used, - block.get_rlp_encode_size(), + block.encode_to_vec().len(), ) }) .collect::>(); diff --git a/crates/networking/p2p/rlpx/l2/l2_connection.rs b/crates/networking/p2p/rlpx/l2/l2_connection.rs index 7649ccdb85d..091974bf8a0 100644 --- a/crates/networking/p2p/rlpx/l2/l2_connection.rs +++ b/crates/networking/p2p/rlpx/l2/l2_connection.rs @@ -202,7 +202,10 @@ pub(crate) async fn send_new_block(established: &mut Established) -> Result<(), "Block header not found after querying for the block number".to_owned(), ), )?; - let new_block = Block::new(new_block_header, new_block_body); + let new_block = Block { + header: new_block_header, + body: new_block_body, + }; let signature = match l2_state .store_rollup .get_signature_by_block(new_block.hash()) diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index a326c715f8b..46f0573bde8 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -618,7 +618,7 @@ impl FullBlockSyncState { .current_headers .drain(..bodies.len()) .zip(bodies) - .map(|(header, body)| Block::new(header, body)); + .map(|(header, body)| Block { header, body }); self.current_blocks.extend(blocks); // } // Execute full blocks diff --git a/crates/storage/store_db/libmdbx.rs b/crates/storage/store_db/libmdbx.rs index c641092c4c4..cd2f9d4f9c8 100644 --- a/crates/storage/store_db/libmdbx.rs +++ b/crates/storage/store_db/libmdbx.rs @@ -154,7 +154,7 @@ impl StoreEngine for Store { .map_err(StoreError::LibmdbxError)?; } } - for mut block in update_batch.blocks { + for block in update_batch.blocks { // store block let number = block.header.number; let hash = block.hash(); @@ -169,23 +169,13 @@ impl StoreEngine for Store { tx.upsert::( hash.into(), - BlockBodyRLP::from_bytes( - block - .cached_body_rlp_encode - .take() - .unwrap_or(block.body.encode_to_vec()), - ), + BlockBodyRLP::from_bytes(block.body.encode_to_vec()), ) .map_err(StoreError::LibmdbxError)?; tx.upsert::( hash.into(), - BlockHeaderRLP::from_bytes( - block - .cached_header_rlp_encode - .take() - .unwrap_or(block.header.encode_to_vec()), - ), + BlockHeaderRLP::from_bytes(block.header.encode_to_vec()), ) .map_err(StoreError::LibmdbxError)?; @@ -271,7 +261,7 @@ impl StoreEngine for Store { tokio::task::spawn_blocking(move || { let tx = db.begin_readwrite().map_err(StoreError::LibmdbxError)?; - for mut block in blocks { + for block in blocks { let number = block.header.number; let hash = block.hash(); @@ -285,23 +275,13 @@ impl StoreEngine for Store { tx.upsert::( hash.into(), - BlockBodyRLP::from_bytes( - block - .cached_body_rlp_encode - .take() - .unwrap_or(block.body.encode_to_vec()), - ), + BlockBodyRLP::from_bytes(block.body.encode_to_vec()), ) .map_err(StoreError::LibmdbxError)?; tx.upsert::( hash.into(), - BlockHeaderRLP::from_bytes( - block - .cached_header_rlp_encode - .take() - .unwrap_or(block.header.encode_to_vec()), - ), + BlockHeaderRLP::from_bytes(block.header.encode_to_vec()), ) .map_err(StoreError::LibmdbxError)?; From 4c0c021fc7a304fa12975b72a3fad57bcda25969 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Tue, 9 Sep 2025 12:47:19 -0300 Subject: [PATCH 33/55] Renamed error and constant --- crates/blockchain/error.rs | 2 +- crates/common/constants.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/blockchain/error.rs b/crates/blockchain/error.rs index 192d8060f67..882576840f4 100644 --- a/crates/blockchain/error.rs +++ b/crates/blockchain/error.rs @@ -68,7 +68,7 @@ pub enum InvalidBlockError { #[error("Invalid transaction: {0}")] InvalidTransaction(String), #[error("Maximum block size exceeded: Maximum is {0} MiB, but block was {1} MiB")] - MaximumSizeExceeded(u64, u64), + MaximumRlpSizeExceeded(u64, u64), } #[derive(Debug, thiserror::Error)] diff --git a/crates/common/constants.rs b/crates/common/constants.rs index 39e0b96c243..ed27cdbbec1 100644 --- a/crates/common/constants.rs +++ b/crates/common/constants.rs @@ -61,5 +61,5 @@ pub const MIN_BASE_FEE_PER_BLOB_GAS: u64 = 1; // === EIP-7934 constants === pub const MAX_BLOCK_SIZE: u64 = 10_485_760; -pub const SAFETY_MARGIN: u64 = 2_097_152; -pub const MAX_RLP_BLOCK_SIZE: u64 = MAX_BLOCK_SIZE - SAFETY_MARGIN; +pub const RLP_BLOCK_SIZE_SAFETY_MARGIN: u64 = 2_097_152; +pub const MAX_RLP_BLOCK_SIZE: u64 = MAX_BLOCK_SIZE - RLP_BLOCK_SIZE_SAFETY_MARGIN; From 30932654992b0ad88810561af8502a602c646f45 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Tue, 9 Sep 2025 12:58:00 -0300 Subject: [PATCH 34/55] Adding file left out from previous commit --- crates/blockchain/blockchain.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index 370b16034e5..6fc3656790d 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -992,7 +992,10 @@ pub fn validate_block( let block_rlp_size = block.encode_to_vec().len(); if block_rlp_size > MAX_RLP_BLOCK_SIZE as usize { return Err(error::ChainError::InvalidBlock( - InvalidBlockError::MaximumSizeExceeded(MAX_RLP_BLOCK_SIZE, block_rlp_size as u64), + InvalidBlockError::MaximumRlpSizeExceeded( + MAX_RLP_BLOCK_SIZE, + block_rlp_size as u64, + ), )); } } else if chain_config.is_prague_activated(block.header.timestamp) { From e6840cf83f85b2672fc28b2109b2a22eda025092 Mon Sep 17 00:00:00 2001 From: SDartayet Date: Tue, 9 Sep 2025 16:57:43 -0300 Subject: [PATCH 35/55] Initial size check implementation on building --- crates/blockchain/payload.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/crates/blockchain/payload.rs b/crates/blockchain/payload.rs index bc1bcd4ff32..e41975885ba 100644 --- a/crates/blockchain/payload.rs +++ b/crates/blockchain/payload.rs @@ -8,7 +8,7 @@ use std::{ use ethrex_common::{ Address, Bloom, Bytes, H256, U256, - constants::{DEFAULT_OMMERS_HASH, DEFAULT_REQUESTS_HASH, GAS_PER_BLOB}, + constants::{DEFAULT_OMMERS_HASH, DEFAULT_REQUESTS_HASH, GAS_PER_BLOB, MAX_RLP_BLOCK_SIZE}, types::{ AccountUpdate, BlobsBundle, Block, BlockBody, BlockHash, BlockHeader, BlockNumber, ChainConfig, MempoolTransaction, Receipt, Transaction, TxType, Withdrawal, bloom_from_logs, @@ -216,6 +216,7 @@ pub struct PayloadBuildContext { pub store: Store, pub vm: Evm, pub account_updates: Vec, + pub payload_size: u64, } impl PayloadBuildContext { @@ -241,6 +242,7 @@ impl PayloadBuildContext { BlockchainType::L2 => Evm::new_for_l2(vm_db)?, }; + let payload_size = payload.encode_to_vec().len() as u64; Ok(PayloadBuildContext { remaining_gas: payload.header.gas_limit, receipts: vec![], @@ -254,6 +256,7 @@ impl PayloadBuildContext { store: storage.clone(), vm, account_updates: Vec::new(), + payload_size, }) } } @@ -518,6 +521,16 @@ impl Blockchain { continue; } + // Check adding a transaction wouldn't exceed the Osaka block size limit of 10 MiB + context.payload_size += head_tx.encode_canonical_to_vec().len() as u64; + if context + .chain_config()? + .is_osaka_activated(context.payload.header.timestamp) + && context.payload_size > MAX_RLP_BLOCK_SIZE + { + continue; + } + // TODO: maybe fetch hash too when filtering mempool so we don't have to compute it here (we can do this in the same refactor as adding timestamp) let tx_hash = head_tx.tx.hash(); From 92e7570594f21ebd20202dd768fd2e66e2a963fc Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Thu, 16 Oct 2025 11:05:23 -0300 Subject: [PATCH 36/55] set new exception for testing --- tooling/ef_tests/blockchain/deserialize.rs | 3 +++ tooling/ef_tests/blockchain/test_runner.rs | 5 +++++ tooling/ef_tests/blockchain/types.rs | 1 + 3 files changed, 9 insertions(+) diff --git a/tooling/ef_tests/blockchain/deserialize.rs b/tooling/ef_tests/blockchain/deserialize.rs index 166adb4e40d..126752bab22 100644 --- a/tooling/ef_tests/blockchain/deserialize.rs +++ b/tooling/ef_tests/blockchain/deserialize.rs @@ -120,6 +120,9 @@ where BlockExpectedException::SystemContractCallFailed, ) } + "BlockException.RLP_BLOCK_LIMIT_EXCEEDED" => BlockChainExpectedException::BlockException( + BlockExpectedException::RlpBlockLimitExceeded, + ), _ => BlockChainExpectedException::Other, }) .collect(); diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index eaee7ba4337..3990b0d87c0 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -224,6 +224,11 @@ fn exception_is_expected( BlockExpectedException::SystemContractCallFailed ), ChainError::EvmError(EvmError::SystemContractCallFailed(_)) + ) | ( + BlockChainExpectedException::BlockException( + BlockExpectedException::RlpBlockLimitExceeded + ), + ChainError::InvalidBlock(InvalidBlockError::MaximumRlpSizeExceeded(_, _)) ) | ( BlockChainExpectedException::Other, _ //TODO: Decide whether to support more specific errors. diff --git a/tooling/ef_tests/blockchain/types.rs b/tooling/ef_tests/blockchain/types.rs index f12ecaa4d7b..f07a056d3e1 100644 --- a/tooling/ef_tests/blockchain/types.rs +++ b/tooling/ef_tests/blockchain/types.rs @@ -605,5 +605,6 @@ pub enum BlockExpectedException { IncorrectBlockFormat, InvalidRequest, SystemContractCallFailed, + RlpBlockLimitExceeded, Other, //TODO: Implement exceptions } From 7c9f02465e865134792ea0184245082430db4b03 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Thu, 16 Oct 2025 11:12:22 -0300 Subject: [PATCH 37/55] stop adding txs when block size cap is surpassed --- crates/blockchain/payload.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/blockchain/payload.rs b/crates/blockchain/payload.rs index 0c0df6f7b39..3fce22f7ca6 100644 --- a/crates/blockchain/payload.rs +++ b/crates/blockchain/payload.rs @@ -531,14 +531,18 @@ impl Blockchain { } // Check adding a transaction wouldn't exceed the Osaka block size limit of 10 MiB - context.payload_size += head_tx.encode_canonical_to_vec().len() as u64; + // if inclusion of the transaction puts the block size over the size limit + // we don't add any more txs to the payload. + let potential_rlp_block_size = + context.payload_size + head_tx.encode_canonical_to_vec().len() as u64; if context .chain_config()? .is_osaka_activated(context.payload.header.timestamp) - && context.payload_size > MAX_RLP_BLOCK_SIZE + && potential_rlp_block_size > MAX_RLP_BLOCK_SIZE { - continue; + break; } + context.payload_size = potential_rlp_block_size; // TODO: maybe fetch hash too when filtering mempool so we don't have to compute it here (we can do this in the same refactor as adding timestamp) let tx_hash = head_tx.tx.hash(); From 591b3ce8c7d5544e0e756d57956aa533aeba342a Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Wed, 22 Oct 2025 17:08:10 -0300 Subject: [PATCH 38/55] add endpoint addPeer --- Cargo.lock | 2 ++ Makefile | 2 +- cmd/ethrex/initializers.rs | 19 +++++++++++++ crates/networking/p2p/rlpx/initiator.rs | 7 +++++ crates/networking/rpc/Cargo.toml | 2 ++ crates/networking/rpc/admin/mod.rs | 2 +- crates/networking/rpc/admin/peers.rs | 38 ++++++++++++++++++++++++- crates/networking/rpc/rpc.rs | 5 ++++ crates/networking/rpc/utils.rs | 1 + 9 files changed, 75 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 839f32ae2da..5093e12e66f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3977,6 +3977,8 @@ dependencies = [ "serde_json", "sha2", "sha3", + "spawned-concurrency", + "spawned-rt", "thiserror 2.0.17", "tokio", "tokio-util", diff --git a/Makefile b/Makefile index 58c5dc04d02..b424f7f11a7 100644 --- a/Makefile +++ b/Makefile @@ -103,7 +103,7 @@ setup-hive: ## šŸ Set up Hive testing framework TEST_PATTERN ?= / SIM_LOG_LEVEL ?= 3 -SIM_PARALLELISM ?= 16 +SIM_PARALLELISM ?= 4 # Runs a Hive testing suite. A web interface showing the results is available at http://127.0.0.1:8080 via the `view-hive` target. # The endpoints tested can be filtered by supplying a test pattern in the form "/endpoint_1|endpoint_2|..|endpoint_n". diff --git a/cmd/ethrex/initializers.rs b/cmd/ethrex/initializers.rs index 96340aad0bd..1b5c7bc1aa2 100644 --- a/cmd/ethrex/initializers.rs +++ b/cmd/ethrex/initializers.rs @@ -143,6 +143,7 @@ pub async fn init_rpc_api( log_filter_handler: Option>, gas_ceil: Option, extra_data: String, + p2p_context: P2PContext, ) { init_datadir(&opts.datadir); @@ -184,6 +185,7 @@ pub async fn init_rpc_api( log_filter_handler, gas_ceil, extra_data, + p2p_context, ); tracker.spawn(rpc_api); @@ -453,6 +455,22 @@ pub async fn init_l1( let cancel_token = tokio_util::sync::CancellationToken::new(); + let p2p_context = P2PContext::new( + local_p2p_node.clone(), + local_node_record.clone(), + tracker.clone(), + signer, + peer_handler.peer_table.clone(), + store.clone(), + blockchain.clone(), + get_client_version(), + #[cfg(feature = "l2")] + based_context, + opts.tx_broadcasting_time_interval, + ) + .await + .expect("P2P context could not be created"); + init_rpc_api( &opts, peer_handler.clone(), @@ -466,6 +484,7 @@ pub async fn init_l1( // TODO (#4482): Make this configurable. None, opts.extra_data.clone(), + p2p_context.clone(), ) .await; diff --git a/crates/networking/p2p/rlpx/initiator.rs b/crates/networking/p2p/rlpx/initiator.rs index 94cb2b97027..74979dd3542 100644 --- a/crates/networking/p2p/rlpx/initiator.rs +++ b/crates/networking/p2p/rlpx/initiator.rs @@ -1,3 +1,4 @@ +use crate::types::Node; use crate::{ discv4::peer_table::PeerTableError, metrics::METRICS, network::P2PContext, rlpx::connection::server::PeerConnection, @@ -81,6 +82,7 @@ impl RLPxInitiator { #[derive(Debug, Clone)] pub enum InMessage { LookForPeers, + Initiate { node: Node }, Shutdown, } @@ -121,6 +123,11 @@ impl GenServer for RLPxInitiator { CastResponse::NoReply } + Self::CastMsg::Initiate { node } => { + PeerConnection::spawn_as_initiator(self.context.clone(), &node).await; + METRICS.record_new_rlpx_conn_attempt().await; + CastResponse::NoReply + } Self::CastMsg::Shutdown => CastResponse::Stop, } } diff --git a/crates/networking/rpc/Cargo.toml b/crates/networking/rpc/Cargo.toml index ef77fbe85e2..47bbdf1db96 100644 --- a/crates/networking/rpc/Cargo.toml +++ b/crates/networking/rpc/Cargo.toml @@ -35,6 +35,8 @@ reqwest.workspace = true sha3 = "0.10.8" sha2.workspace = true jemalloc_pprof = { version = "0.8.0", optional = true, features = ["flamegraph", "symbolize"] } +spawned-rt.workspace = true +spawned-concurrency.workspace = true # Clients envy = "0.4.2" diff --git a/crates/networking/rpc/admin/mod.rs b/crates/networking/rpc/admin/mod.rs index 28238aff6eb..5f01d82185b 100644 --- a/crates/networking/rpc/admin/mod.rs +++ b/crates/networking/rpc/admin/mod.rs @@ -10,7 +10,7 @@ use crate::{ utils::{RpcErr, RpcRequest}, }; mod peers; -pub use peers::peers; +pub use peers::{add_peers, peers}; #[derive(Serialize, Debug)] struct NodeInfo { diff --git a/crates/networking/rpc/admin/peers.rs b/crates/networking/rpc/admin/peers.rs index 3e74f0bfc21..e82ef2ed7ac 100644 --- a/crates/networking/rpc/admin/peers.rs +++ b/crates/networking/rpc/admin/peers.rs @@ -1,9 +1,18 @@ use crate::{rpc::RpcApiContext, utils::RpcErr}; use core::net::SocketAddr; use ethrex_common::H256; -use ethrex_p2p::{discv4::peer_table::PeerData, rlpx::p2p::Capability}; +use ethrex_p2p::{ + discv4::peer_table::PeerData, + network::P2PContext, + rlpx::{ + initiator::{InMessage, RLPxInitiator}, + p2p::Capability, + }, + types::Node, +}; use serde::Serialize; use serde_json::Value; +use spawned_concurrency::tasks::GenServer; /// Serializable peer data returned by the node's rpc #[derive(Serialize)] @@ -88,6 +97,33 @@ pub async fn peers(context: &mut RpcApiContext) -> Result { .collect::>(); Ok(serde_json::to_value(peers)?) } +use crate::utils::RpcRequest; +pub async fn add_peers(request: &RpcRequest, context: P2PContext) -> Result { + let params = request + .params + .clone() + .ok_or(RpcErr::MissingParam("enode url".to_string()))?; + + if params.len() != 1 { + return Err(RpcErr::BadParams("Expected 1 param".to_owned())); + }; + + let url = params + .first() + .ok_or(RpcErr::MissingParam("enode url".to_string()))? + .as_str() + .ok_or(RpcErr::WrongParam("Expected string".to_string()))?; + + let node = Node::from_enode_url(url).map_err(|error| RpcErr::BadParams(error.to_string()))?; + + let state = RLPxInitiator::new(context); + let mut server = RLPxInitiator::start_on_thread(state.clone()); + + match server.cast(InMessage::Initiate { node }).await { + Err(_) => Ok(serde_json::to_value(false)?), + Ok(_) => Ok(serde_json::to_value(true)?), + } +} // TODO: Adapt the test to the new P2P architecture. #[cfg(test)] diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index 18666d22fc2..b00e73bea2b 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -54,6 +54,7 @@ use axum_extra::{ use bytes::Bytes; use ethrex_blockchain::Blockchain; use ethrex_common::types::DEFAULT_BUILDER_GAS_CEIL; +use ethrex_p2p::network::P2PContext; use ethrex_p2p::peer_handler::PeerHandler; use ethrex_p2p::sync_manager::SyncManager; use ethrex_p2p::types::Node; @@ -165,6 +166,7 @@ pub struct RpcApiContext { pub gas_tip_estimator: Arc>, pub log_filter_handler: Option>, pub gas_ceil: u64, + pub p2p_context: P2PContext, } #[derive(Debug, Clone)] @@ -212,6 +214,7 @@ pub async fn start_api( log_filter_handler: Option>, gas_ceil: Option, extra_data: String, + p2p_cxt: P2PContext, ) -> Result<(), RpcErr> { // TODO: Refactor how filters are handled, // filters are used by the filters endpoints (eth_newFilter, eth_getFilterChanges, ...etc) @@ -232,6 +235,7 @@ pub async fn start_api( gas_tip_estimator: Arc::new(TokioMutex::new(GasTipEstimator::new())), log_filter_handler, gas_ceil: gas_ceil.unwrap_or(DEFAULT_BUILDER_GAS_CEIL), + p2p_context: p2p_cxt, }; // Periodically clean up the active filters for the filters endpoints. @@ -531,6 +535,7 @@ pub async fn map_admin_requests( "admin_nodeInfo" => admin::node_info(context.storage, &context.node_data), "admin_peers" => admin::peers(&mut context).await, "admin_setLogLevel" => admin::set_log_level(req, &context.log_filter_handler).await, + "admin_addPeer" => admin::add_peers(req, context.p2p_context).await, unknown_admin_method => Err(RpcErr::MethodNotFound(unknown_admin_method.to_owned())), } } diff --git a/crates/networking/rpc/utils.rs b/crates/networking/rpc/utils.rs index 9bf286f8ce7..1851f58a21e 100644 --- a/crates/networking/rpc/utils.rs +++ b/crates/networking/rpc/utils.rs @@ -425,6 +425,7 @@ pub mod test_utils { gas_tip_estimator: Arc::new(TokioMutex::new(GasTipEstimator::new())), log_filter_handler: None, gas_ceil: DEFAULT_BUILDER_GAS_CEIL, + p2p_context, } } } From 036b751ef4db105121313a63d6b5104af1e86195 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Wed, 22 Oct 2025 18:37:25 -0300 Subject: [PATCH 39/55] have p2p_context as a parameter for init_network --- cmd/ethrex/initializers.rs | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/cmd/ethrex/initializers.rs b/cmd/ethrex/initializers.rs index 1b5c7bc1aa2..0a7a9ca866c 100644 --- a/cmd/ethrex/initializers.rs +++ b/cmd/ethrex/initializers.rs @@ -196,14 +196,11 @@ pub async fn init_network( opts: &Options, network: &Network, datadir: &Path, - local_p2p_node: Node, - local_node_record: Arc>, - signer: SecretKey, peer_handler: PeerHandler, - store: Store, tracker: TaskTracker, blockchain: Arc, #[cfg(feature = "l2")] based_context: Option, + context: P2PContext, ) { if opts.dev { error!("Binary wasn't built with The feature flag `dev` enabled."); @@ -214,22 +211,6 @@ pub async fn init_network( let bootnodes = get_bootnodes(opts, network, datadir); - let context = P2PContext::new( - local_p2p_node, - local_node_record, - tracker.clone(), - signer, - peer_handler.peer_table.clone(), - store, - blockchain.clone(), - get_client_version(), - #[cfg(feature = "l2")] - based_context, - opts.tx_broadcasting_time_interval, - ) - .await - .expect("P2P context could not be created"); - ethrex_p2p::start_network(context, bootnodes) .await .expect("Network starts"); @@ -472,9 +453,9 @@ pub async fn init_l1( .expect("P2P context could not be created"); init_rpc_api( - &opts, + &opts.clone(), peer_handler.clone(), - local_p2p_node.clone(), + local_p2p_node, local_node_record.lock().await.clone(), store.clone(), blockchain.clone(), @@ -483,7 +464,7 @@ pub async fn init_l1( log_filter_handler, // TODO (#4482): Make this configurable. None, - opts.extra_data.clone(), + opts.clone().extra_data, p2p_context.clone(), ) .await; @@ -500,15 +481,12 @@ pub async fn init_l1( &opts, &network, datadir, - local_p2p_node, - local_node_record.clone(), - signer, peer_handler.clone(), - store.clone(), tracker.clone(), blockchain.clone(), #[cfg(feature = "l2")] None, + p2p_context, ) .await; } else { From 5e660a8601a7b82be9fdeef68267d31a8b6556ea Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Thu, 23 Oct 2025 19:30:11 -0300 Subject: [PATCH 40/55] pass genserver instead of P2P context as a parameter --- Cargo.lock | 2 + cmd/ethrex/Cargo.toml | 3 ++ cmd/ethrex/initializers.rs | 17 +++---- cmd/ethrex/l2/initializers.rs | 65 +++++++++++++++---------- crates/networking/p2p/network.rs | 41 +++++++++++++++- crates/networking/p2p/peer_handler.rs | 15 ++++-- crates/networking/p2p/rlpx/initiator.rs | 13 ++++- crates/networking/p2p/sync.rs | 4 +- crates/networking/p2p/sync_manager.rs | 4 +- crates/networking/rpc/admin/peers.rs | 11 ++--- crates/networking/rpc/rpc.rs | 6 +-- crates/networking/rpc/utils.rs | 9 ++-- 12 files changed, 127 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5093e12e66f..802db2c5dc5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3542,6 +3542,8 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "spawned-concurrency", + "spawned-rt", "thiserror 2.0.17", "tikv-jemallocator", "tokio", diff --git a/cmd/ethrex/Cargo.toml b/cmd/ethrex/Cargo.toml index e91f6dc5304..9e6dcb70da3 100644 --- a/cmd/ethrex/Cargo.toml +++ b/cmd/ethrex/Cargo.toml @@ -52,6 +52,9 @@ thiserror.workspace = true itertools = "0.14.0" url.workspace = true +spawned-rt.workspace = true +spawned-concurrency.workspace = true + # L2 external dependencies tui-logger = { workspace = true, optional = true } diff --git a/cmd/ethrex/initializers.rs b/cmd/ethrex/initializers.rs index 0a7a9ca866c..cf411e64579 100644 --- a/cmd/ethrex/initializers.rs +++ b/cmd/ethrex/initializers.rs @@ -11,8 +11,7 @@ use ethrex_common::types::Genesis; use ethrex_config::networks::Network; use ethrex_metrics::profiling::{FunctionProfilingLayer, initialize_block_processing_profile}; -#[cfg(feature = "l2")] -use ethrex_p2p::rlpx::l2::l2_connection::P2PBasedContext; +use ethrex_p2p::rlpx::initiator::RLPxInitiator; use ethrex_p2p::{ discv4::peer_table::PeerTable, network::P2PContext, @@ -143,7 +142,6 @@ pub async fn init_rpc_api( log_filter_handler: Option>, gas_ceil: Option, extra_data: String, - p2p_context: P2PContext, ) { init_datadir(&opts.datadir); @@ -185,7 +183,6 @@ pub async fn init_rpc_api( log_filter_handler, gas_ceil, extra_data, - p2p_context, ); tracker.spawn(rpc_api); @@ -199,7 +196,6 @@ pub async fn init_network( peer_handler: PeerHandler, tracker: TaskTracker, blockchain: Arc, - #[cfg(feature = "l2")] based_context: Option, context: P2PContext, ) { if opts.dev { @@ -429,7 +425,7 @@ pub async fn init_l1( &signer, ))); - let peer_handler = PeerHandler::new(PeerTable::spawn(opts.target_peers)); + let peer_table = PeerTable::spawn(opts.target_peers); // TODO: Check every module starts properly. let tracker = TaskTracker::new(); @@ -441,7 +437,7 @@ pub async fn init_l1( local_node_record.clone(), tracker.clone(), signer, - peer_handler.peer_table.clone(), + peer_table.clone(), store.clone(), blockchain.clone(), get_client_version(), @@ -452,6 +448,10 @@ pub async fn init_l1( .await .expect("P2P context could not be created"); + let initiator = RLPxInitiator::spawn(p2p_context.clone()).await; + + let peer_handler = PeerHandler::new(peer_table.clone(), initiator); + init_rpc_api( &opts.clone(), peer_handler.clone(), @@ -465,7 +465,6 @@ pub async fn init_l1( // TODO (#4482): Make this configurable. None, opts.clone().extra_data, - p2p_context.clone(), ) .await; @@ -484,8 +483,6 @@ pub async fn init_l1( peer_handler.clone(), tracker.clone(), blockchain.clone(), - #[cfg(feature = "l2")] - None, p2p_context, ) .await; diff --git a/cmd/ethrex/l2/initializers.rs b/cmd/ethrex/l2/initializers.rs index 1e44039ce83..64b13b31a24 100644 --- a/cmd/ethrex/l2/initializers.rs +++ b/cmd/ethrex/l2/initializers.rs @@ -11,6 +11,8 @@ use ethrex_blockchain::{Blockchain, BlockchainType}; use ethrex_common::types::fee_config::FeeConfig; use ethrex_common::{Address, types::DEFAULT_BUILDER_GAS_CEIL}; use ethrex_l2::SequencerConfig; +use ethrex_p2p::network::P2PContext; +use ethrex_p2p::rlpx::initiator::RLPxInitiator; use ethrex_p2p::{ discv4::peer_table::PeerTable, peer_handler::PeerHandler, @@ -32,7 +34,7 @@ use tui_logger::{LevelFilter, TuiTracingSubscriberLayer}; async fn init_rpc_api( opts: &L1Options, l2_opts: &L2Options, - peer_table: PeerTable, + peer_handler: PeerHandler, local_p2p_node: Node, local_node_record: NodeRecord, store: Store, @@ -43,8 +45,6 @@ async fn init_rpc_api( log_filter_handler: Option>, gas_ceil: Option, ) { - let peer_handler = PeerHandler::new(peer_table); - init_datadir(&opts.datadir); // Create SyncManager @@ -181,18 +181,52 @@ pub async fn init_l2( &signer, ))); - let peer_handler = PeerHandler::new(PeerTable::spawn(opts.node_opts.target_peers)); + let peer_table = PeerTable::spawn(opts.node_opts.target_peers); // TODO: Check every module starts properly. let tracker = TaskTracker::new(); let mut join_set = JoinSet::new(); + let p2p_context = P2PContext::new( + local_p2p_node.clone(), + local_node_record.clone(), + tracker.clone(), + signer, + peer_table.clone(), + store.clone(), + blockchain.clone(), + get_client_version(), + #[cfg(feature = "l2")] + Some(P2PBasedContext { + store_rollup: rollup_store.clone(), + // TODO: The Web3Signer refactor introduced a limitation where the committer key cannot be accessed directly because the signer could be either Local or Remote. + // The Signer enum cannot be used in the P2PBasedContext struct due to cyclic dependencies between the l2-rpc and p2p crates. + // As a temporary solution, a dummy committer key is used until a proper mechanism to utilize the Signer enum is implemented. + // This should be replaced with the Signer enum once the refactor is complete. + committer_key: Arc::new( + SecretKey::from_slice( + &hex::decode( + "385c546456b6a603a1cfcaa9ec9494ba4832da08dd6bcf4de9a71e4a01b74924", + ) + .expect("Invalid committer key"), + ) + .expect("Failed to create committer key"), + ), + }), + opts.node_opts.tx_broadcasting_time_interval, + ) + .await + .expect("P2P context could not be created"); + + let initiator = RLPxInitiator::spawn(p2p_context.clone()).await; + let peer_handler = PeerHandler::new(PeerTable::spawn(opts.node_opts.target_peers), initiator); + let cancel_token = tokio_util::sync::CancellationToken::new(); init_rpc_api( &opts.node_opts, &opts, - peer_handler.peer_table.clone(), + peer_handler.clone(), local_p2p_node.clone(), local_node_record.lock().await.clone(), store.clone(), @@ -224,29 +258,10 @@ pub async fn init_l2( &opts.node_opts, &network, &datadir, - local_p2p_node, - local_node_record.clone(), - signer, peer_handler.clone(), - store.clone(), tracker, blockchain.clone(), - Some(P2PBasedContext { - store_rollup: rollup_store.clone(), - // TODO: The Web3Signer refactor introduced a limitation where the committer key cannot be accessed directly because the signer could be either Local or Remote. - // The Signer enum cannot be used in the P2PBasedContext struct due to cyclic dependencies between the l2-rpc and p2p crates. - // As a temporary solution, a dummy committer key is used until a proper mechanism to utilize the Signer enum is implemented. - // This should be replaced with the Signer enum once the refactor is complete. - committer_key: Arc::new( - SecretKey::from_slice( - &hex::decode( - "385c546456b6a603a1cfcaa9ec9494ba4832da08dd6bcf4de9a71e4a01b74924", - ) - .expect("Invalid committer key"), - ) - .expect("Failed to create committer key"), - ), - }), + p2p_context, ) .await; } else { diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index 67843eea006..560c0bd012b 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -8,7 +8,7 @@ use crate::{ metrics::METRICS, rlpx::{ connection::server::{PeerConnBroadcastSender, PeerConnection}, - initiator::RLPxInitiator, + //initiator::RLPxInitiator, message::Message, p2p::SUPPORTED_SNAP_CAPABILITIES, }, @@ -94,6 +94,43 @@ impl P2PContext { tx_broadcaster, }) } + pub async fn dummy(peer_table: PeerTable) -> P2PContext { + use ethrex_blockchain::BlockchainOptions; + use ethrex_storage::EngineType; + + let blockchain_opts = BlockchainOptions::default(); + let storage = Store::new("./temp", EngineType::InMemory).expect("Failed to create Store"); + let blockchain: Arc = + Arc::new(Blockchain::new(storage.clone(), blockchain_opts)); + let local_node = Node::from_enode_url( + "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", + ).expect("Bad enode url"); + let signer = SecretKey::from_slice(&[ + 16, 125, 177, 238, 167, 212, 168, 215, 239, 165, 77, 224, 199, 143, 55, 205, 9, 194, + 87, 139, 92, 46, 30, 191, 74, 37, 68, 242, 38, 225, 104, 246, + ]) + .expect("Bad secret key"); + let (channel_broadcast_send_end, _) = + tokio::sync::broadcast::channel::<(tokio::task::Id, Arc)>(100000); + P2PContext { + tracker: TaskTracker::default(), + signer: SecretKey::from_byte_array(&[0xcd; 32]).expect("32 bytes, within curve order"), + table: peer_table.clone(), + storage, + blockchain: blockchain.clone(), + broadcast: channel_broadcast_send_end, + local_node: local_node.clone(), + local_node_record: Arc::new(Mutex::new( + NodeRecord::from_node(&local_node, 1, &signer).expect("Bad Node Record"), + )), + client_version: "".to_string(), + #[cfg(feature = "l2")] + based_context: None, + tx_broadcaster: TxBroadcaster::spawn(peer_table.clone(), blockchain, 1000) + .await + .expect("Failed to spawn tx broadcaster"), + } + } } #[derive(Debug, thiserror::Error)] @@ -123,7 +160,7 @@ pub async fn start_network(context: P2PContext, bootnodes: Vec) -> Result< error!("Failed to start discovery server: {e}"); })?; - RLPxInitiator::spawn(context.clone()).await; + //RLPxInitiator::spawn(context.clone()).await; context.tracker.spawn(serve_p2p_requests(context.clone())); diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index a06d1db806d..559ee63ad7e 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1,3 +1,4 @@ +use crate::rlpx::initiator::RLPxInitiator; use crate::{ discv4::peer_table::{PeerData, PeerTable, PeerTableError, TARGET_PEERS}, metrics::{CurrentStepValue, METRICS}, @@ -34,6 +35,7 @@ use ethrex_rlp::encode::RLPEncode; use ethrex_storage::Store; use ethrex_trie::Nibbles; use ethrex_trie::{Node, verify_range}; +use spawned_concurrency::tasks::GenServerHandle; use std::{ collections::{BTreeMap, HashMap, HashSet, VecDeque}, io::ErrorKind, @@ -68,6 +70,7 @@ pub const MAX_BLOCK_BODIES_TO_REQUEST: usize = 128; #[derive(Debug, Clone)] pub struct PeerHandler { pub peer_table: PeerTable, + pub initiator: GenServerHandle, } pub enum BlockRequestOrder { @@ -142,14 +145,18 @@ async fn ask_peer_head_number( } impl PeerHandler { - pub fn new(peer_table: PeerTable) -> PeerHandler { - Self { peer_table } + pub fn new(peer_table: PeerTable, initiator: GenServerHandle) -> PeerHandler { + Self { + peer_table, + initiator, + } } /// Creates a dummy PeerHandler for tests where interacting with peers is not needed /// This should only be used in tests as it won't be able to interact with the node's connected peers - pub fn dummy() -> PeerHandler { - PeerHandler::new(PeerTable::spawn(TARGET_PEERS)) + pub async fn dummy() -> PeerHandler { + let peer_table = PeerTable::spawn(TARGET_PEERS); + PeerHandler::new(peer_table.clone(), RLPxInitiator::dummy(peer_table).await) } async fn make_request( diff --git a/crates/networking/p2p/rlpx/initiator.rs b/crates/networking/p2p/rlpx/initiator.rs index 74979dd3542..d1ddda4338d 100644 --- a/crates/networking/p2p/rlpx/initiator.rs +++ b/crates/networking/p2p/rlpx/initiator.rs @@ -1,6 +1,8 @@ use crate::types::Node; use crate::{ - discv4::peer_table::PeerTableError, metrics::METRICS, network::P2PContext, + discv4::peer_table::{PeerTable, PeerTableError}, + metrics::METRICS, + network::P2PContext, rlpx::connection::server::PeerConnection, }; use spawned_concurrency::{ @@ -42,11 +44,12 @@ impl RLPxInitiator { } } - pub async fn spawn(context: P2PContext) { + pub async fn spawn(context: P2PContext) -> GenServerHandle { info!("Starting RLPx Initiator"); let state = RLPxInitiator::new(context); let mut server = RLPxInitiator::start_on_thread(state.clone()); let _ = server.cast(InMessage::LookForPeers).await; + server } async fn look_for_peers(&mut self) -> Result<(), RLPxInitiatorError> { @@ -77,6 +80,12 @@ impl RLPxInitiator { self.lookup_interval } } + + pub async fn dummy(peer_table: PeerTable) -> GenServerHandle { + info!("Starting RLPx Initiator"); + let state = RLPxInitiator::new(P2PContext::dummy(peer_table).await); + RLPxInitiator::start_on_thread(state.clone()) + } } #[derive(Debug, Clone)] diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index f850a914238..a82cfb219ac 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -121,10 +121,10 @@ impl Syncer { /// Creates a dummy Syncer for tests where syncing is not needed /// This should only be used in tests as it won't be able to connect to the p2p network - pub fn dummy() -> Self { + pub async fn dummy() -> Self { Self { snap_enabled: Arc::new(AtomicBool::new(false)), - peers: PeerHandler::dummy(), + peers: PeerHandler::dummy().await, // This won't be used cancel_token: CancellationToken::new(), blockchain: Arc::new(Blockchain::default_with_store( diff --git a/crates/networking/p2p/sync_manager.rs b/crates/networking/p2p/sync_manager.rs index 6ed52054853..ad0660547d7 100644 --- a/crates/networking/p2p/sync_manager.rs +++ b/crates/networking/p2p/sync_manager.rs @@ -69,10 +69,10 @@ impl SyncManager { /// Creates a dummy SyncManager for tests where syncing is not needed /// This should only be used in tests as it won't be able to connect to the p2p network - pub fn dummy() -> Self { + pub async fn dummy() -> Self { Self { snap_enabled: Arc::new(AtomicBool::new(false)), - syncer: Arc::new(Mutex::new(Syncer::dummy())), + syncer: Arc::new(Mutex::new(Syncer::dummy().await)), last_fcu_head: Arc::new(Mutex::new(H256::zero())), store: Store::new("temp.db", ethrex_storage::EngineType::InMemory) .expect("Failed to start Storage Engine"), diff --git a/crates/networking/rpc/admin/peers.rs b/crates/networking/rpc/admin/peers.rs index e82ef2ed7ac..42d5a53bd4c 100644 --- a/crates/networking/rpc/admin/peers.rs +++ b/crates/networking/rpc/admin/peers.rs @@ -3,7 +3,6 @@ use core::net::SocketAddr; use ethrex_common::H256; use ethrex_p2p::{ discv4::peer_table::PeerData, - network::P2PContext, rlpx::{ initiator::{InMessage, RLPxInitiator}, p2p::Capability, @@ -12,7 +11,7 @@ use ethrex_p2p::{ }; use serde::Serialize; use serde_json::Value; -use spawned_concurrency::tasks::GenServer; +use spawned_concurrency::tasks::GenServerHandle; /// Serializable peer data returned by the node's rpc #[derive(Serialize)] @@ -98,7 +97,10 @@ pub async fn peers(context: &mut RpcApiContext) -> Result { Ok(serde_json::to_value(peers)?) } use crate::utils::RpcRequest; -pub async fn add_peers(request: &RpcRequest, context: P2PContext) -> Result { +pub async fn add_peers( + request: &RpcRequest, + mut server: GenServerHandle, +) -> Result { let params = request .params .clone() @@ -116,9 +118,6 @@ pub async fn add_peers(request: &RpcRequest, context: P2PContext) -> Result Ok(serde_json::to_value(false)?), Ok(_) => Ok(serde_json::to_value(true)?), diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index b00e73bea2b..adeecfce4d6 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -54,7 +54,6 @@ use axum_extra::{ use bytes::Bytes; use ethrex_blockchain::Blockchain; use ethrex_common::types::DEFAULT_BUILDER_GAS_CEIL; -use ethrex_p2p::network::P2PContext; use ethrex_p2p::peer_handler::PeerHandler; use ethrex_p2p::sync_manager::SyncManager; use ethrex_p2p::types::Node; @@ -166,7 +165,6 @@ pub struct RpcApiContext { pub gas_tip_estimator: Arc>, pub log_filter_handler: Option>, pub gas_ceil: u64, - pub p2p_context: P2PContext, } #[derive(Debug, Clone)] @@ -214,7 +212,6 @@ pub async fn start_api( log_filter_handler: Option>, gas_ceil: Option, extra_data: String, - p2p_cxt: P2PContext, ) -> Result<(), RpcErr> { // TODO: Refactor how filters are handled, // filters are used by the filters endpoints (eth_newFilter, eth_getFilterChanges, ...etc) @@ -235,7 +232,6 @@ pub async fn start_api( gas_tip_estimator: Arc::new(TokioMutex::new(GasTipEstimator::new())), log_filter_handler, gas_ceil: gas_ceil.unwrap_or(DEFAULT_BUILDER_GAS_CEIL), - p2p_context: p2p_cxt, }; // Periodically clean up the active filters for the filters endpoints. @@ -535,7 +531,7 @@ pub async fn map_admin_requests( "admin_nodeInfo" => admin::node_info(context.storage, &context.node_data), "admin_peers" => admin::peers(&mut context).await, "admin_setLogLevel" => admin::set_log_level(req, &context.log_filter_handler).await, - "admin_addPeer" => admin::add_peers(req, context.p2p_context).await, + "admin_addPeer" => admin::add_peers(req, context.peer_handler.initiator).await, unknown_admin_method => Err(RpcErr::MethodNotFound(unknown_admin_method.to_owned())), } } diff --git a/crates/networking/rpc/utils.rs b/crates/networking/rpc/utils.rs index 1851f58a21e..563878c078e 100644 --- a/crates/networking/rpc/utils.rs +++ b/crates/networking/rpc/utils.rs @@ -394,8 +394,8 @@ pub mod test_utils { jwt_secret, local_p2p_node, local_node_record, - SyncManager::dummy(), - PeerHandler::dummy(), + SyncManager::dummy().await, + PeerHandler::dummy().await, "ethrex/test".to_string(), None, None, @@ -413,8 +413,8 @@ pub mod test_utils { storage, blockchain, active_filters: Default::default(), - syncer: Arc::new(SyncManager::dummy()), - peer_handler: PeerHandler::dummy(), + syncer: Arc::new(SyncManager::dummy().await), + peer_handler: PeerHandler::dummy().await, node_data: NodeData { jwt_secret: Default::default(), local_p2p_node: example_p2p_node(), @@ -425,7 +425,6 @@ pub mod test_utils { gas_tip_estimator: Arc::new(TokioMutex::new(GasTipEstimator::new())), log_filter_handler: None, gas_ceil: DEFAULT_BUILDER_GAS_CEIL, - p2p_context, } } } From f814ee0dab415ff7807a3ec7a698eb65bea4228f Mon Sep 17 00:00:00 2001 From: cdiielsi <49721261+cdiielsi@users.noreply.github.com> Date: Mon, 27 Oct 2025 10:45:04 -0300 Subject: [PATCH 41/55] use default_with_store to create blockchain Co-authored-by: ElFantasma --- crates/networking/p2p/network.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index 560c0bd012b..cbe02bcf0a5 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -98,10 +98,8 @@ impl P2PContext { use ethrex_blockchain::BlockchainOptions; use ethrex_storage::EngineType; - let blockchain_opts = BlockchainOptions::default(); let storage = Store::new("./temp", EngineType::InMemory).expect("Failed to create Store"); - let blockchain: Arc = - Arc::new(Blockchain::new(storage.clone(), blockchain_opts)); + let blockchain: Arc = Arc::new(Blockchain::default_with_store(storage.clone())); let local_node = Node::from_enode_url( "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", ).expect("Bad enode url"); From c7e29bda8c90d7e9e1df70e431137065cab59c8e Mon Sep 17 00:00:00 2001 From: cdiielsi <49721261+cdiielsi@users.noreply.github.com> Date: Mon, 27 Oct 2025 10:46:54 -0300 Subject: [PATCH 42/55] reduce code by setting signer randomly Co-authored-by: ElFantasma --- crates/networking/p2p/network.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index cbe02bcf0a5..bf629431da7 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -103,11 +103,7 @@ impl P2PContext { let local_node = Node::from_enode_url( "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", ).expect("Bad enode url"); - let signer = SecretKey::from_slice(&[ - 16, 125, 177, 238, 167, 212, 168, 215, 239, 165, 77, 224, 199, 143, 55, 205, 9, 194, - 87, 139, 92, 46, 30, 191, 74, 37, 68, 242, 38, 225, 104, 246, - ]) - .expect("Bad secret key"); + let signer = SecretKey::new(&mut rand::rngs::OsRng); let (channel_broadcast_send_end, _) = tokio::sync::broadcast::channel::<(tokio::task::Id, Arc)>(100000); P2PContext { From 12baa8891d51e675e7b2bac11a060c74f2a82149 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Mon, 27 Oct 2025 10:51:13 -0300 Subject: [PATCH 43/55] drop unused import --- crates/networking/p2p/network.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index bf629431da7..14ee3fca3be 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -95,7 +95,6 @@ impl P2PContext { }) } pub async fn dummy(peer_table: PeerTable) -> P2PContext { - use ethrex_blockchain::BlockchainOptions; use ethrex_storage::EngineType; let storage = Store::new("./temp", EngineType::InMemory).expect("Failed to create Store"); From 8add4d8e2859569787979cbe5fb8a250dd3483df Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Mon, 27 Oct 2025 11:00:45 -0300 Subject: [PATCH 44/55] fix clippy --- cmd/ethrex/initializers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/ethrex/initializers.rs b/cmd/ethrex/initializers.rs index cf411e64579..deee122d447 100644 --- a/cmd/ethrex/initializers.rs +++ b/cmd/ethrex/initializers.rs @@ -442,7 +442,7 @@ pub async fn init_l1( blockchain.clone(), get_client_version(), #[cfg(feature = "l2")] - based_context, + None, opts.tx_broadcasting_time_interval, ) .await From 34595656ac7a5634788f7470cc89fca233c458b3 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Mon, 27 Oct 2025 12:03:25 -0300 Subject: [PATCH 45/55] drop commented code --- crates/networking/p2p/network.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index 14ee3fca3be..254181c8e57 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -8,7 +8,6 @@ use crate::{ metrics::METRICS, rlpx::{ connection::server::{PeerConnBroadcastSender, PeerConnection}, - //initiator::RLPxInitiator, message::Message, p2p::SUPPORTED_SNAP_CAPABILITIES, }, @@ -153,8 +152,6 @@ pub async fn start_network(context: P2PContext, bootnodes: Vec) -> Result< error!("Failed to start discovery server: {e}"); })?; - //RLPxInitiator::spawn(context.clone()).await; - context.tracker.spawn(serve_p2p_requests(context.clone())); Ok(()) From d95b72ec300511595bdc31ee94a602505f1f57b0 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Tue, 28 Oct 2025 11:27:02 -0300 Subject: [PATCH 46/55] add_peer returns true if the conection was made --- crates/networking/rpc/admin/peers.rs | 37 ++++++++++++++++++---------- crates/networking/rpc/rpc.rs | 2 +- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/crates/networking/rpc/admin/peers.rs b/crates/networking/rpc/admin/peers.rs index 42d5a53bd4c..e68bda1b0ec 100644 --- a/crates/networking/rpc/admin/peers.rs +++ b/crates/networking/rpc/admin/peers.rs @@ -1,18 +1,14 @@ +use crate::utils::RpcRequest; use crate::{rpc::RpcApiContext, utils::RpcErr}; use core::net::SocketAddr; use ethrex_common::H256; use ethrex_p2p::{ discv4::peer_table::PeerData, - rlpx::{ - initiator::{InMessage, RLPxInitiator}, - p2p::Capability, - }, + rlpx::{initiator::InMessage, p2p::Capability}, types::Node, }; use serde::Serialize; use serde_json::Value; -use spawned_concurrency::tasks::GenServerHandle; - /// Serializable peer data returned by the node's rpc #[derive(Serialize)] pub struct RpcPeer { @@ -96,11 +92,9 @@ pub async fn peers(context: &mut RpcApiContext) -> Result { .collect::>(); Ok(serde_json::to_value(peers)?) } -use crate::utils::RpcRequest; -pub async fn add_peers( - request: &RpcRequest, - mut server: GenServerHandle, -) -> Result { + +pub async fn add_peers(context: &mut RpcApiContext, request: &RpcRequest) -> Result { + let mut server = context.peer_handler.initiator.clone(); let params = request .params .clone() @@ -118,9 +112,26 @@ pub async fn add_peers( let node = Node::from_enode_url(url).map_err(|error| RpcErr::BadParams(error.to_string()))?; - match server.cast(InMessage::Initiate { node }).await { + match server + .cast(InMessage::Initiate { node: node.clone() }) + .await + { Err(_) => Ok(serde_json::to_value(false)?), - Ok(_) => Ok(serde_json::to_value(true)?), + Ok(_) => { + if context + .peer_handler + .read_connected_peers() + .await + .into_iter() + .map(|peer| peer.node.enode_url()) + .collect::>() + .contains(&node.enode_url()) + { + Ok(serde_json::to_value(true)?) + } else { + Ok(serde_json::to_value(false)?) + } + } } } diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index 917cf834a86..e49983167c2 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -530,7 +530,7 @@ pub async fn map_admin_requests( "admin_nodeInfo" => admin::node_info(context.storage, &context.node_data), "admin_peers" => admin::peers(&mut context).await, "admin_setLogLevel" => admin::set_log_level(req, &context.log_filter_handler).await, - "admin_addPeer" => admin::add_peers(req, context.peer_handler.initiator).await, + "admin_addPeer" => admin::add_peers(&mut context, req).await, unknown_admin_method => Err(RpcErr::MethodNotFound(unknown_admin_method.to_owned())), } } From a79fb9a237d1c591b4a08153b0bb7125a2e393ba Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Wed, 29 Oct 2025 10:51:10 -0300 Subject: [PATCH 47/55] include extra logs for testing --- crates/networking/rpc/admin/peers.rs | 30 +++++++++++++++------------- tooling/sync/Makefile | 4 ++-- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/crates/networking/rpc/admin/peers.rs b/crates/networking/rpc/admin/peers.rs index e68bda1b0ec..b6779564488 100644 --- a/crates/networking/rpc/admin/peers.rs +++ b/crates/networking/rpc/admin/peers.rs @@ -9,6 +9,7 @@ use ethrex_p2p::{ }; use serde::Serialize; use serde_json::Value; +use tracing::info; /// Serializable peer data returned by the node's rpc #[derive(Serialize)] pub struct RpcPeer { @@ -116,21 +117,22 @@ pub async fn add_peers(context: &mut RpcApiContext, request: &RpcRequest) -> Res .cast(InMessage::Initiate { node: node.clone() }) .await { - Err(_) => Ok(serde_json::to_value(false)?), + Err(_) => { + info!("ACA salió mal"); + Ok(serde_json::to_value(false)?) + } Ok(_) => { - if context - .peer_handler - .read_connected_peers() - .await - .into_iter() - .map(|peer| peer.node.enode_url()) - .collect::>() - .contains(&node.enode_url()) - { - Ok(serde_json::to_value(true)?) - } else { - Ok(serde_json::to_value(false)?) - } + info!("ACA salió bien"); + Ok(serde_json::to_value( + context + .peer_handler + .read_connected_peers() + .await + .into_iter() + .map(|peer| peer.node.enode_url()) + .collect::>() + .contains(&node.enode_url()), + )?) } } } diff --git a/tooling/sync/Makefile b/tooling/sync/Makefile index 329ea0cc056..b32672bd60d 100644 --- a/tooling/sync/Makefile +++ b/tooling/sync/Makefile @@ -172,7 +172,7 @@ start-lighthouse: ## Start lighthouse for the network given by NETWORK. --disable-deposit-contract-sync --port $(LIGHTHOUSE_PORT) --discovery-port $(LIGHTHOUSE_DISCOVERY_PORT) start-ethrex: ## Start ethrex for the network given by NETWORK. - cd $(ETHREX_DIR) && RUST_LOG=3 cargo run --release --features "rocksdb sync-test metrics" --bin ethrex -- \ + cd $(ETHREX_DIR) && RUST_LOG=info,ethrex_p2p=debug cargo run --release --features "rocksdb sync-test metrics" --bin ethrex -- \ --http.addr 0.0.0.0 \ --http.port 8545 \ --authrpc.port 8551 \ @@ -182,7 +182,7 @@ start-ethrex: ## Start ethrex for the network given by NETWORK. --metrics.port 3701 \ --network $(NETWORK) \ --datadir "$(DATA_PATH)/${NETWORK}_data/ethrex/$(EVM)" \ - --authrpc.jwtsecret $(DATA_PATH)/${NETWORK}_data/jwt.hex \ + --authrpc.jwtsecret "$(DATA_PATH)/${NETWORK}_data/jwt.hex" \ $(BOOTNODES_FLAG) \ SERVER_SYNC_BRANCH ?= main From e51ca4fc54f6b60a0bf98cd0858e12f195758082 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Wed, 29 Oct 2025 11:49:08 -0300 Subject: [PATCH 48/55] restore log level --- tooling/sync/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tooling/sync/Makefile b/tooling/sync/Makefile index b32672bd60d..8d7980d952a 100644 --- a/tooling/sync/Makefile +++ b/tooling/sync/Makefile @@ -172,7 +172,7 @@ start-lighthouse: ## Start lighthouse for the network given by NETWORK. --disable-deposit-contract-sync --port $(LIGHTHOUSE_PORT) --discovery-port $(LIGHTHOUSE_DISCOVERY_PORT) start-ethrex: ## Start ethrex for the network given by NETWORK. - cd $(ETHREX_DIR) && RUST_LOG=info,ethrex_p2p=debug cargo run --release --features "rocksdb sync-test metrics" --bin ethrex -- \ + cd $(ETHREX_DIR) && RUST_LOG=3 cargo run --release --features "rocksdb sync-test metrics" --bin ethrex -- \ --http.addr 0.0.0.0 \ --http.port 8545 \ --authrpc.port 8551 \ From ce6526a4bd045a0af8ff24c347cedd50084aa496 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Fri, 31 Oct 2025 15:27:27 -0300 Subject: [PATCH 49/55] add sleep and timeout to make sure connection is made of possible --- crates/networking/rpc/admin/mod.rs | 2 +- crates/networking/rpc/admin/peers.rs | 57 +++++++++++++++++----------- crates/networking/rpc/rpc.rs | 2 +- 3 files changed, 36 insertions(+), 25 deletions(-) diff --git a/crates/networking/rpc/admin/mod.rs b/crates/networking/rpc/admin/mod.rs index 9499b081156..408a0fee43c 100644 --- a/crates/networking/rpc/admin/mod.rs +++ b/crates/networking/rpc/admin/mod.rs @@ -10,7 +10,7 @@ use crate::{ utils::{RpcErr, RpcRequest}, }; mod peers; -pub use peers::{add_peers, peers}; +pub use peers::{add_peer, peers}; #[derive(Serialize, Debug)] struct NodeInfo { diff --git a/crates/networking/rpc/admin/peers.rs b/crates/networking/rpc/admin/peers.rs index b6779564488..7b1266d72dc 100644 --- a/crates/networking/rpc/admin/peers.rs +++ b/crates/networking/rpc/admin/peers.rs @@ -4,12 +4,14 @@ use core::net::SocketAddr; use ethrex_common::H256; use ethrex_p2p::{ discv4::peer_table::PeerData, + peer_handler::PeerHandler, rlpx::{initiator::InMessage, p2p::Capability}, types::Node, }; use serde::Serialize; use serde_json::Value; -use tracing::info; +use tokio::time::{Duration, Instant}; + /// Serializable peer data returned by the node's rpc #[derive(Serialize)] pub struct RpcPeer { @@ -94,8 +96,7 @@ pub async fn peers(context: &mut RpcApiContext) -> Result { Ok(serde_json::to_value(peers)?) } -pub async fn add_peers(context: &mut RpcApiContext, request: &RpcRequest) -> Result { - let mut server = context.peer_handler.initiator.clone(); +fn parse(request: &RpcRequest) -> Result { let params = request .params .clone() @@ -111,32 +112,42 @@ pub async fn add_peers(context: &mut RpcApiContext, request: &RpcRequest) -> Res .as_str() .ok_or(RpcErr::WrongParam("Expected string".to_string()))?; - let node = Node::from_enode_url(url).map_err(|error| RpcErr::BadParams(error.to_string()))?; + Node::from_enode_url(url).map_err(|error| RpcErr::BadParams(error.to_string())) +} - match server - .cast(InMessage::Initiate { node: node.clone() }) - .await - { - Err(_) => { - info!("ACA salió mal"); - Ok(serde_json::to_value(false)?) +pub async fn add_peer(context: &mut RpcApiContext, request: &RpcRequest) -> Result { + let mut server = context.peer_handler.initiator.clone(); + let node = parse(request)?; + + let start = Instant::now(); + let runtime = Duration::from_secs(10); + + loop { + let cast_result = server + .cast(InMessage::Initiate { node: node.clone() }) + .await; + + if peer_is_conected(&mut context.peer_handler, &node.enode_url()).await { + return Ok(serde_json::to_value(true)?); } - Ok(_) => { - info!("ACA salió bien"); - Ok(serde_json::to_value( - context - .peer_handler - .read_connected_peers() - .await - .into_iter() - .map(|peer| peer.node.enode_url()) - .collect::>() - .contains(&node.enode_url()), - )?) + + if matches!(cast_result, Err(_)) || start.elapsed() >= runtime { + return Ok(serde_json::to_value(false)?); } + std::thread::sleep(Duration::from_millis(100)); } } +async fn peer_is_conected(peer_handler: &mut PeerHandler, enode_url: &String) -> bool { + peer_handler + .read_connected_peers() + .await + .into_iter() + .map(|peer| peer.node.enode_url()) + .collect::>() + .contains(enode_url) +} + // TODO: Adapt the test to the new P2P architecture. #[cfg(test)] mod tests { diff --git a/crates/networking/rpc/rpc.rs b/crates/networking/rpc/rpc.rs index df901960f47..8ae787d19d5 100644 --- a/crates/networking/rpc/rpc.rs +++ b/crates/networking/rpc/rpc.rs @@ -555,7 +555,7 @@ pub async fn map_admin_requests( "admin_nodeInfo" => admin::node_info(context.storage, &context.node_data), "admin_peers" => admin::peers(&mut context).await, "admin_setLogLevel" => admin::set_log_level(req, &context.log_filter_handler).await, - "admin_addPeer" => admin::add_peers(&mut context, req).await, + "admin_addPeer" => admin::add_peer(&mut context, req).await, unknown_admin_method => Err(RpcErr::MethodNotFound(unknown_admin_method.to_owned())), } } From 760003031b3395ef6339b26a2697d16919018869 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Fri, 31 Oct 2025 17:33:35 -0300 Subject: [PATCH 50/55] restore hive parallelism --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b424f7f11a7..58c5dc04d02 100644 --- a/Makefile +++ b/Makefile @@ -103,7 +103,7 @@ setup-hive: ## šŸ Set up Hive testing framework TEST_PATTERN ?= / SIM_LOG_LEVEL ?= 3 -SIM_PARALLELISM ?= 4 +SIM_PARALLELISM ?= 16 # Runs a Hive testing suite. A web interface showing the results is available at http://127.0.0.1:8080 via the `view-hive` target. # The endpoints tested can be filtered by supplying a test pattern in the form "/endpoint_1|endpoint_2|..|endpoint_n". From 96fc4ef867467ee80e86830a2d56fe1d49817149 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Fri, 31 Oct 2025 19:03:11 -0300 Subject: [PATCH 51/55] add comment on loop --- crates/networking/rpc/admin/peers.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/networking/rpc/admin/peers.rs b/crates/networking/rpc/admin/peers.rs index 179b92d9230..0dd4fed1146 100644 --- a/crates/networking/rpc/admin/peers.rs +++ b/crates/networking/rpc/admin/peers.rs @@ -122,6 +122,8 @@ pub async fn add_peer(context: &mut RpcApiContext, request: &RpcRequest) -> Resu let start = Instant::now(); let runtime = Duration::from_secs(10); + // This loop is necessary because connections are asynchronous, so to check if the connection with the peer was actually + // established we need to wait. loop { let cast_result = server .cast(InMessage::Initiate { node: node.clone() }) From e541fe057fda55a968bef80297ad1096ae17d742 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Mon, 3 Nov 2025 18:38:48 -0300 Subject: [PATCH 52/55] put dummies under the test flag and set parallelism to 8 --- .github/workflows/pr-main_l1.yaml | 2 +- crates/networking/p2p/Cargo.toml | 1 + crates/networking/p2p/peer_handler.rs | 5 ++++- crates/networking/p2p/rlpx/initiator.rs | 6 +++++- crates/networking/p2p/sync.rs | 5 ++++- crates/networking/p2p/sync_manager.rs | 1 + crates/networking/rpc/Cargo.toml | 2 +- 7 files changed, 17 insertions(+), 5 deletions(-) diff --git a/.github/workflows/pr-main_l1.yaml b/.github/workflows/pr-main_l1.yaml index 19abf558d84..e86bab776cf 100644 --- a/.github/workflows/pr-main_l1.yaml +++ b/.github/workflows/pr-main_l1.yaml @@ -241,7 +241,7 @@ jobs: SIM_LIMIT: ${{ matrix.limit }} SIM_BUILDARG: ${{ matrix.buildarg }} run: | - FLAGS='--sim.parallelism 4 --sim.loglevel 3' + FLAGS='--sim.parallelism 8 --sim.loglevel 3' if [[ -n "$SIM_LIMIT" ]]; then escaped_limit=${SIM_LIMIT//\'/\'\\\'\'} FLAGS+=" --sim.limit '$escaped_limit'" diff --git a/crates/networking/p2p/Cargo.toml b/crates/networking/p2p/Cargo.toml index ce819dc570d..f321678fff8 100644 --- a/crates/networking/p2p/Cargo.toml +++ b/crates/networking/p2p/Cargo.toml @@ -62,6 +62,7 @@ default = ["c-kzg"] c-kzg = ["ethrex-blockchain/c-kzg", "ethrex-common/c-kzg"] sync-test = [] l2 = ["dep:ethrex-storage-rollup"] +test-utils = [] [lints.clippy] unwrap_used = "deny" diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 7e80b6381f4..387d0cf5231 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1,6 +1,8 @@ +#[cfg(any(test, feature = "test-utils"))] +use crate::discv4::peer_table::TARGET_PEERS; use crate::rlpx::initiator::RLPxInitiator; use crate::{ - discv4::peer_table::{PeerData, PeerTable, PeerTableError, TARGET_PEERS}, + discv4::peer_table::{PeerData, PeerTable, PeerTableError}, metrics::{CurrentStepValue, METRICS}, rlpx::{ connection::server::PeerConnection, @@ -152,6 +154,7 @@ impl PeerHandler { } } + #[cfg(any(test, feature = "test-utils"))] /// Creates a dummy PeerHandler for tests where interacting with peers is not needed /// This should only be used in tests as it won't be able to interact with the node's connected peers pub async fn dummy() -> PeerHandler { diff --git a/crates/networking/p2p/rlpx/initiator.rs b/crates/networking/p2p/rlpx/initiator.rs index 65f45fe11e0..7d80be110ad 100644 --- a/crates/networking/p2p/rlpx/initiator.rs +++ b/crates/networking/p2p/rlpx/initiator.rs @@ -1,7 +1,7 @@ use crate::types::Node; use crate::{ discv4::{ - peer_table::{PeerTable, PeerTableError}, + peer_table::PeerTableError, server::{INITIAL_LOOKUP_INTERVAL, LOOKUP_INTERVAL}, }, metrics::METRICS, @@ -15,6 +15,9 @@ use spawned_concurrency::{ use std::time::Duration; use tracing::{debug, error, info}; +#[cfg(any(test, feature = "test-utils"))] +use crate::discv4::peer_table::PeerTable; + #[derive(Debug, thiserror::Error)] pub enum RLPxInitiatorError { #[error(transparent)] @@ -76,6 +79,7 @@ impl RLPxInitiator { } } + #[cfg(any(test, feature = "test-utils"))] pub async fn dummy(peer_table: PeerTable) -> GenServerHandle { info!("Starting RLPx Initiator"); let state = RLPxInitiator::new(P2PContext::dummy(peer_table).await); diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index f767e47fe2c..fd1c25012ab 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -24,7 +24,9 @@ use ethrex_common::{ types::{AccountState, Block, BlockHash, BlockHeader}, }; use ethrex_rlp::{decode::RLPDecode, encode::RLPEncode, error::RLPDecodeError}; -use ethrex_storage::{EngineType, STATE_TRIE_SEGMENTS, Store, error::StoreError}; +#[cfg(any(test, feature = "test-utils"))] +use ethrex_storage::EngineType; +use ethrex_storage::{STATE_TRIE_SEGMENTS, Store, error::StoreError}; use ethrex_trie::trie_sorted::TrieGenerationError; use ethrex_trie::{Trie, TrieError}; use rayon::iter::{ParallelBridge, ParallelIterator}; @@ -120,6 +122,7 @@ impl Syncer { } } + #[cfg(any(test, feature = "test-utils"))] /// Creates a dummy Syncer for tests where syncing is not needed /// This should only be used in tests as it won't be able to connect to the p2p network pub async fn dummy() -> Self { diff --git a/crates/networking/p2p/sync_manager.rs b/crates/networking/p2p/sync_manager.rs index b46f397c9ff..45aa4e1c685 100644 --- a/crates/networking/p2p/sync_manager.rs +++ b/crates/networking/p2p/sync_manager.rs @@ -67,6 +67,7 @@ impl SyncManager { sync_manager } + #[cfg(any(test, feature = "test-utils"))] /// Creates a dummy SyncManager for tests where syncing is not needed /// This should only be used in tests as it won't be able to connect to the p2p network pub async fn dummy() -> Self { diff --git a/crates/networking/rpc/Cargo.toml b/crates/networking/rpc/Cargo.toml index 41643c9c791..ad9729aa08a 100644 --- a/crates/networking/rpc/Cargo.toml +++ b/crates/networking/rpc/Cargo.toml @@ -20,7 +20,7 @@ ethrex-common.workspace = true ethrex-storage.workspace = true ethrex-vm.workspace = true ethrex-blockchain.workspace = true -ethrex-p2p.workspace = true +ethrex-p2p = {workspace = true, features = ["test-utils"]} ethrex-rlp.workspace = true ethrex-trie.workspace = true ethrex-storage-rollup = { workspace = true, optional = true } From 9ee1a184c91a1b277ceeb7efa1cdc7ee6f4e51e5 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Tue, 4 Nov 2025 11:03:00 -0300 Subject: [PATCH 53/55] include copilot's comments --- cmd/ethrex/initializers.rs | 2 +- crates/networking/p2p/network.rs | 4 ++++ crates/networking/p2p/rlpx/initiator.rs | 2 ++ crates/networking/rpc/admin/peers.rs | 8 +++----- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cmd/ethrex/initializers.rs b/cmd/ethrex/initializers.rs index f5028d824f4..4d48cd52d71 100644 --- a/cmd/ethrex/initializers.rs +++ b/cmd/ethrex/initializers.rs @@ -441,7 +441,7 @@ pub async fn init_l1( let peer_handler = PeerHandler::new(peer_table.clone(), initiator); init_rpc_api( - &opts.clone(), + &opts, peer_handler.clone(), local_p2p_node, local_node_record.clone(), diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index d38c70f4b6a..ce6951e3db0 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -93,6 +93,10 @@ impl P2PContext { tx_broadcaster, }) } + + #[cfg(any(test, feature = "test-utils"))] + /// Creates a dummy P2PContext for tests + /// This should only be used in tests as it won't be able to connect to the p2p network pub async fn dummy(peer_table: PeerTable) -> P2PContext { use ethrex_storage::EngineType; diff --git a/crates/networking/p2p/rlpx/initiator.rs b/crates/networking/p2p/rlpx/initiator.rs index 7d80be110ad..4ca7b1cc502 100644 --- a/crates/networking/p2p/rlpx/initiator.rs +++ b/crates/networking/p2p/rlpx/initiator.rs @@ -80,6 +80,8 @@ impl RLPxInitiator { } #[cfg(any(test, feature = "test-utils"))] + /// Creates a dummy GenServer for tests + /// This should only be used in tests pub async fn dummy(peer_table: PeerTable) -> GenServerHandle { info!("Starting RLPx Initiator"); let state = RLPxInitiator::new(P2PContext::dummy(peer_table).await); diff --git a/crates/networking/rpc/admin/peers.rs b/crates/networking/rpc/admin/peers.rs index 0dd4fed1146..8109f575791 100644 --- a/crates/networking/rpc/admin/peers.rs +++ b/crates/networking/rpc/admin/peers.rs @@ -129,7 +129,7 @@ pub async fn add_peer(context: &mut RpcApiContext, request: &RpcRequest) -> Resu .cast(InMessage::Initiate { node: node.clone() }) .await; - if peer_is_conected(&mut context.peer_handler, &node.enode_url()).await { + if peer_is_connected(&mut context.peer_handler, &node.enode_url()).await { return Ok(serde_json::to_value(true)?); } @@ -140,14 +140,12 @@ pub async fn add_peer(context: &mut RpcApiContext, request: &RpcRequest) -> Resu } } -async fn peer_is_conected(peer_handler: &mut PeerHandler, enode_url: &String) -> bool { +async fn peer_is_connected(peer_handler: &mut PeerHandler, enode_url: &str) -> bool { peer_handler .read_connected_peers() .await .into_iter() - .map(|peer| peer.node.enode_url()) - .collect::>() - .contains(enode_url) + .any(|peer| peer.node.enode_url() == *enode_url) } // TODO: Adapt the test to the new P2P architecture. From 45a65cf3a5d76b5c9c8a3dd5794f53ee97bd8bb4 Mon Sep 17 00:00:00 2001 From: cdiielsi <49721261+cdiielsi@users.noreply.github.com> Date: Tue, 4 Nov 2025 12:22:31 -0300 Subject: [PATCH 54/55] restore og ci parallelism for running hive tests --- .github/workflows/pr-main_l1.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-main_l1.yaml b/.github/workflows/pr-main_l1.yaml index e86bab776cf..19abf558d84 100644 --- a/.github/workflows/pr-main_l1.yaml +++ b/.github/workflows/pr-main_l1.yaml @@ -241,7 +241,7 @@ jobs: SIM_LIMIT: ${{ matrix.limit }} SIM_BUILDARG: ${{ matrix.buildarg }} run: | - FLAGS='--sim.parallelism 8 --sim.loglevel 3' + FLAGS='--sim.parallelism 4 --sim.loglevel 3' if [[ -n "$SIM_LIMIT" ]]; then escaped_limit=${SIM_LIMIT//\'/\'\\\'\'} FLAGS+=" --sim.limit '$escaped_limit'" From d17f68aa79e114f560e05c1016927dfc0ec4efa7 Mon Sep 17 00:00:00 2001 From: Camila Di Ielsi Date: Tue, 4 Nov 2025 16:12:21 -0300 Subject: [PATCH 55/55] add corrections mentioned in review --- cmd/ethrex/l2/initializers.rs | 1 - crates/networking/p2p/rlpx/initiator.rs | 2 +- crates/networking/rpc/admin/peers.rs | 11 +++++------ 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/cmd/ethrex/l2/initializers.rs b/cmd/ethrex/l2/initializers.rs index a97aa43cbc1..77e5e9b1211 100644 --- a/cmd/ethrex/l2/initializers.rs +++ b/cmd/ethrex/l2/initializers.rs @@ -13,7 +13,6 @@ use ethrex_common::types::fee_config::{FeeConfig, L1FeeConfig, OperatorFeeConfig use ethrex_common::{Address, types::DEFAULT_BUILDER_GAS_CEIL}; use ethrex_l2::SequencerConfig; use ethrex_l2::sequencer::l1_committer::regenerate_head_state; - use ethrex_p2p::{ discv4::peer_table::PeerTable, network::P2PContext, diff --git a/crates/networking/p2p/rlpx/initiator.rs b/crates/networking/p2p/rlpx/initiator.rs index 4ca7b1cc502..9ef6366e0c2 100644 --- a/crates/networking/p2p/rlpx/initiator.rs +++ b/crates/networking/p2p/rlpx/initiator.rs @@ -51,7 +51,7 @@ impl RLPxInitiator { pub async fn spawn(context: P2PContext) -> GenServerHandle { info!("Starting RLPx Initiator"); let state = RLPxInitiator::new(context); - let mut server = RLPxInitiator::start_on_thread(state.clone()); + let mut server = RLPxInitiator::start(state.clone()); let _ = server.cast(InMessage::LookForPeer).await; server } diff --git a/crates/networking/rpc/admin/peers.rs b/crates/networking/rpc/admin/peers.rs index 8109f575791..f125f69e621 100644 --- a/crates/networking/rpc/admin/peers.rs +++ b/crates/networking/rpc/admin/peers.rs @@ -122,13 +122,12 @@ pub async fn add_peer(context: &mut RpcApiContext, request: &RpcRequest) -> Resu let start = Instant::now(); let runtime = Duration::from_secs(10); + let cast_result = server + .cast(InMessage::Initiate { node: node.clone() }) + .await; // This loop is necessary because connections are asynchronous, so to check if the connection with the peer was actually // established we need to wait. loop { - let cast_result = server - .cast(InMessage::Initiate { node: node.clone() }) - .await; - if peer_is_connected(&mut context.peer_handler, &node.enode_url()).await { return Ok(serde_json::to_value(true)?); } @@ -136,7 +135,7 @@ pub async fn add_peer(context: &mut RpcApiContext, request: &RpcRequest) -> Resu if cast_result.is_err() || start.elapsed() >= runtime { return Ok(serde_json::to_value(false)?); } - std::thread::sleep(Duration::from_millis(100)); + let _ = tokio::time::sleep(Duration::from_millis(100)).await; } } @@ -144,7 +143,7 @@ async fn peer_is_connected(peer_handler: &mut PeerHandler, enode_url: &str) -> b peer_handler .read_connected_peers() .await - .into_iter() + .iter() .any(|peer| peer.node.enode_url() == *enode_url) }