diff --git a/Cargo.lock b/Cargo.lock index 141b9b2ac18..638eab7df8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -932,7 +932,7 @@ dependencies = [ "object 0.37.3", "rustc-demangle", "serde", - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -1532,7 +1532,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -2272,7 +2272,7 @@ checksum = "881c5d0a13b2f1498e2306e82cbada78390e152d4b1378fb28a84f4dcd0dc4f3" dependencies = [ "dispatch", "nix", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -2766,7 +2766,7 @@ dependencies = [ "libc", "option-ext", "redox_users 0.5.2", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -3147,7 +3147,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -4824,12 +4824,13 @@ dependencies = [ [[package]] name = "half" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +checksum = "e54c115d4f30f52c67202f079c5f9d8b49db4691f460fdb0b4c2e838261b2ba5" dependencies = [ "cfg-if 1.0.3", "crunchy", + "zerocopy 0.8.27", ] [[package]] @@ -5241,7 +5242,7 @@ dependencies = [ "tokio", "tokio-rustls 0.26.4", "tower-service", - "webpki-roots 1.0.2", + "webpki-roots 1.0.3", ] [[package]] @@ -5323,7 +5324,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.62.1", + "windows-core 0.62.2", ] [[package]] @@ -6032,9 +6033,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.176" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libgit2-sys" @@ -6055,7 +6056,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if 1.0.3", - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -7552,7 +7553,7 @@ dependencies = [ "libc", "redox_syscall 0.5.18", "smallvec", - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -8053,7 +8054,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.6", + "toml_edit 0.23.7", ] [[package]] @@ -8738,7 +8739,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.2", + "webpki-roots 1.0.3", ] [[package]] @@ -9455,7 +9456,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -9702,7 +9703,7 @@ version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -10284,9 +10285,9 @@ dependencies = [ [[package]] name = "sp1-core-executor" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bc79ba7a23ee664870ac6dd9ca8125d9fd0bb1c6acb13cb34cb1c0b81458e89" +checksum = "117e991e137c9121eac26b2fd06daaa8f9e7c118a167d9a977e0048ac2142fac" dependencies = [ "bincode", "bytemuck", @@ -10327,9 +10328,9 @@ dependencies = [ [[package]] name = "sp1-core-machine" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1cbc279cf9dcf1faabc8d9b592027cf5ce5bfea6d44d2da58351379f92dba1" +checksum = "7b70e76953d1c4d507136b373bc198df0271d011c9acaa9b1eb8614dd65a0e04" dependencies = [ "bincode", "cbindgen", @@ -10400,9 +10401,9 @@ dependencies = [ [[package]] name = "sp1-curves" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69234f4667ae1a00f7bfb90b42d6aa141744114b128ac262b9a28e9c869cf514" +checksum = "3e29cb79716167e58c0719d572e686880172f1816cd85e0acab74ea0ff3c795e" dependencies = [ "cfg-if 1.0.3", "dashu", @@ -10422,9 +10423,9 @@ dependencies = [ [[package]] name = "sp1-derive" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a736bce661752b1d6ecf33eca197443fb535124b3caabd332862d6f8258e3c8d" +checksum = "7ac59616976c008e862f99d26fd0c1c037d464df33d9ca548be88f938f0b1bcf" dependencies = [ "quote", "syn 1.0.109", @@ -10432,9 +10433,9 @@ dependencies = [ [[package]] name = "sp1-lib" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1fe81b6f87134f9170cb642f948ae41e0ee1cd3785e0cb665add5b67106d1a" +checksum = "fce8ad0f153443d09d398eccb650a0b2dcbf829470e394e4bf60ec4379c7af93" dependencies = [ "bincode", "elliptic-curve", @@ -10444,9 +10445,9 @@ dependencies = [ [[package]] name = "sp1-primitives" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dddd8d022840c1c500e0d7f82e9b9cf080b7dabd469f06b394010e6a594f692b" +checksum = "0244dee3a7a0f88cf71c3edf518f4fc97794ae870a107cbe7c810ac3fbf879cb" dependencies = [ "bincode", "blake3", @@ -10544,9 +10545,9 @@ dependencies = [ [[package]] name = "sp1-recursion-compiler" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61aa201b49cbdd52be19faec75f648e7e5e2c4930bcea7f4d1f1dbb3882cc518" +checksum = "5611ead360e9875f426c5add60ce8082bfee28302a5c7dbfa39cad02e9178f88" dependencies = [ "backtrace", "itertools 0.13.0", @@ -10566,9 +10567,9 @@ dependencies = [ [[package]] name = "sp1-recursion-core" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e919d8031abe3b01ed001d5877801c2edcea0d98de56786a3e631a10fea3400d" +checksum = "79029408bee7a503394ab9d738432e709475976034348d5107a1d1a0b06dc287" dependencies = [ "backtrace", "cbindgen", @@ -10609,9 +10610,9 @@ dependencies = [ [[package]] name = "sp1-recursion-derive" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c8467ade873bf1e43d8e6386a7feaac6e9603c12771fb33c5b0c0a6f3c63bc" +checksum = "632f557f5bbfc8bc21b2bc319d3a375b46ea4522c00b875f02d73e4c0709b023" dependencies = [ "quote", "syn 1.0.109", @@ -10690,9 +10691,9 @@ dependencies = [ [[package]] name = "sp1-stark" -version = "5.2.1" +version = "5.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48b9b57606ab0eb9560f0456dc978166ab0a3bd9d8b3f2ab24ea5e1377c56f07" +checksum = "1f0cdde80366245a374d29fecdde2881286002a6e3f51b84f54b86560ed026e5" dependencies = [ "arrayref", "hashbrown 0.14.5", @@ -10718,7 +10719,6 @@ dependencies = [ "sp1-derive", "sp1-primitives", "strum 0.26.3", - "strum_macros 0.26.4", "sysinfo", "tracing", ] @@ -10823,9 +10823,9 @@ dependencies = [ [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "static_assertions" @@ -11074,7 +11074,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -11501,9 +11501,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" dependencies = [ "serde_core", ] @@ -11535,21 +11535,21 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.6" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3effe7c0e86fdff4f69cdd2ccc1b96f933e24811c5441d44904e8683e27184b" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ "indexmap 2.11.4", - "toml_datetime 0.7.2", + "toml_datetime 0.7.3", "toml_parser", "winnow 0.7.13", ] [[package]] name = "toml_parser" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" dependencies = [ "winnow 0.7.13", ] @@ -12448,14 +12448,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.2", + "webpki-roots 1.0.3", ] [[package]] name = "webpki-roots" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" dependencies = [ "rustls-pki-types", ] @@ -12504,7 +12504,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.1", + "windows-sys 0.61.2", ] [[package]] @@ -12534,22 +12534,22 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.62.1" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6844ee5416b285084d3d3fffd743b925a6c9385455f64f6d4fa3031c4c2749a9" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link 0.2.0", - "windows-result 0.4.0", - "windows-strings 0.5.0", + "windows-link 0.2.1", + "windows-result 0.4.1", + "windows-strings 0.5.1", ] [[package]] name = "windows-implement" -version = "0.60.1" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb307e42a74fb6de9bf3a02d9712678b22399c87e6fa869d6dfcd8c1b7754e0" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", @@ -12558,9 +12558,9 @@ dependencies = [ [[package]] name = "windows-interface" -version = "0.59.2" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0abd1ddbc6964ac14db11c7213d6532ef34bd9aa042c2e5935f59d7908b46a5" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", @@ -12575,9 +12575,9 @@ checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-link" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-registry" @@ -12601,11 +12601,11 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -12619,11 +12619,11 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -12659,16 +12659,16 @@ version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.53.4", + "windows-targets 0.53.5", ] [[package]] name = "windows-sys" -version = "0.61.1" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f109e41dd4a3c848907eb83d5a42ea98b3769495597450cf6d153507b166f0f" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link 0.2.0", + "windows-link 0.2.1", ] [[package]] @@ -12704,19 +12704,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.4" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d42b7b7f66d2a06854650af09cfdf8713e427a439c97ad65a6375318033ac4b" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link 0.2.0", - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link 0.2.1", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -12733,9 +12733,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -12751,9 +12751,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -12769,9 +12769,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -12781,9 +12781,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -12799,9 +12799,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -12817,9 +12817,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -12835,9 +12835,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -12853,9 +12853,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" diff --git a/Cargo.toml b/Cargo.toml index 9fa8dd76c34..0732a2df572 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -113,8 +113,8 @@ url = { version = "2.5.4", features = ["serde"] } kzg-rs = "0.2.6" libsql = "0.9.10" futures = "0.3.31" -spawned-concurrency = "0.4.0" -spawned-rt = "0.4.0" +spawned-concurrency = "0.4.1" +spawned-rt = "0.4.1" lambdaworks-crypto = "0.11.0" tui-logger = { version = "0.17.3", features = ["tracing-support"] } crossbeam = "0.8.4" diff --git a/cmd/ethrex/ethrex.rs b/cmd/ethrex/ethrex.rs index 8f80135d37d..a402852b843 100644 --- a/cmd/ethrex/ethrex.rs +++ b/cmd/ethrex/ethrex.rs @@ -4,7 +4,7 @@ use ethrex::{ initializers::{init_l1, init_tracing}, utils::{NodeConfigFile, get_client_version, store_node_config_file}, }; -use ethrex_p2p::{discv4::peer_table::PeerTableHandle, types::NodeRecord}; +use ethrex_p2p::{discv4::peer_table::PeerTable, types::NodeRecord}; use std::{path::Path, sync::Arc, time::Duration}; use tokio::{ signal::unix::{SignalKind, signal}, @@ -35,7 +35,7 @@ pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0 async fn server_shutdown( datadir: &Path, cancel_token: &CancellationToken, - peer_table: PeerTableHandle, + peer_table: PeerTable, local_node_record: Arc>, ) { info!("Server shut down started..."); diff --git a/cmd/ethrex/initializers.rs b/cmd/ethrex/initializers.rs index 2ae5602521e..77c264b2ccf 100644 --- a/cmd/ethrex/initializers.rs +++ b/cmd/ethrex/initializers.rs @@ -12,7 +12,7 @@ use ethrex_config::networks::Network; use ethrex_metrics::profiling::{FunctionProfilingLayer, initialize_block_processing_profile}; use ethrex_p2p::{ - discv4::peer_table::{PeerTable, PeerTableHandle}, + discv4::peer_table::PeerTable, network::P2PContext, peer_handler::PeerHandler, rlpx::l2::l2_connection::P2PBasedContext, @@ -390,7 +390,7 @@ pub async fn init_l1( ) -> eyre::Result<( PathBuf, CancellationToken, - PeerTableHandle, + PeerTable, Arc>, )> { let datadir = &opts.datadir; diff --git a/cmd/ethrex/l2/initializers.rs b/cmd/ethrex/l2/initializers.rs index b865c752894..9e39147b141 100644 --- a/cmd/ethrex/l2/initializers.rs +++ b/cmd/ethrex/l2/initializers.rs @@ -11,7 +11,7 @@ use ethrex_blockchain::{Blockchain, BlockchainType}; use ethrex_common::{Address, types::DEFAULT_BUILDER_GAS_CEIL}; use ethrex_l2::SequencerConfig; use ethrex_p2p::{ - discv4::peer_table::{PeerTable, PeerTableHandle}, + discv4::peer_table::PeerTable, peer_handler::PeerHandler, rlpx::l2::l2_connection::P2PBasedContext, sync_manager::SyncManager, @@ -31,7 +31,7 @@ use tui_logger::{LevelFilter, TuiTracingSubscriberLayer}; async fn init_rpc_api( opts: &L1Options, l2_opts: &L2Options, - peer_table: PeerTableHandle, + peer_table: PeerTable, local_p2p_node: Node, local_node_record: NodeRecord, store: Store, diff --git a/cmd/ethrex/utils.rs b/cmd/ethrex/utils.rs index f17df3756f2..e4eecc0c502 100644 --- a/cmd/ethrex/utils.rs +++ b/cmd/ethrex/utils.rs @@ -3,7 +3,7 @@ use bytes::Bytes; use directories::ProjectDirs; use ethrex_common::types::{Block, Genesis}; use ethrex_p2p::{ - discv4::peer_table::PeerTableHandle, + discv4::peer_table::PeerTable, sync::SyncMode, types::{Node, NodeRecord}, }; @@ -26,7 +26,7 @@ pub struct NodeConfigFile { } impl NodeConfigFile { - pub async fn new(mut peer_table: PeerTableHandle, node_record: NodeRecord) -> Self { + pub async fn new(mut peer_table: PeerTable, node_record: NodeRecord) -> Self { let connected_peers = peer_table.get_connected_nodes().await.unwrap_or(Vec::new()); NodeConfigFile { diff --git a/crates/l2/tee/quote-gen/Cargo.lock b/crates/l2/tee/quote-gen/Cargo.lock index 6d5e9f4a051..508fb285b1e 100644 --- a/crates/l2/tee/quote-gen/Cargo.lock +++ b/crates/l2/tee/quote-gen/Cargo.lock @@ -2071,7 +2071,6 @@ name = "ethrex-blockchain" version = "0.1.0" dependencies = [ "bytes", - "cfg-if 1.0.3", "ethrex-common", "ethrex-metrics", "ethrex-rlp", @@ -2090,14 +2089,15 @@ name = "ethrex-common" version = "0.1.0" dependencies = [ "bytes", - "c-kzg", "crc32fast", "ethereum-types 0.15.1", + "ethrex-crypto", "ethrex-rlp", "ethrex-trie", "hex", "kzg-rs", "lazy_static", + "libc", "once_cell", "rayon", "rkyv", @@ -2126,6 +2126,11 @@ dependencies = [ [[package]] name = "ethrex-crypto" version = "0.1.0" +dependencies = [ + "c-kzg", + "kzg-rs", + "thiserror 2.0.16", +] [[package]] name = "ethrex-dev" @@ -2154,7 +2159,6 @@ dependencies = [ "axum", "bincode", "bytes", - "cfg-if 1.0.3", "chrono", "clap", "color-eyre", @@ -2352,7 +2356,6 @@ dependencies = [ "axum", "axum-extra", "bytes", - "cfg-if 1.0.3", "envy", "ethereum-types 0.15.1", "ethrex-blockchain", @@ -2485,7 +2488,6 @@ version = "0.1.0" dependencies = [ "bincode", "bytes", - "cfg-if 1.0.3", "derive_more 1.0.0", "dyn-clone", "ethereum-types 0.15.1", @@ -2864,6 +2866,7 @@ dependencies = [ "bytes", "ethrex-blockchain", "ethrex-common", + "ethrex-crypto", "ethrex-l2-common", "ethrex-rlp", "ethrex-storage", @@ -5840,11 +5843,12 @@ dependencies = [ [[package]] name = "spawned-concurrency" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e46fce4cdece99f2f07c125e902629ee25e4487d80cae6e3d6e891d5906b00" +checksum = "aa00b753ef7c942c13ee953f13609746a41c0fb8cf221849bbf3f654811a6669" dependencies = [ "futures", + "pin-project-lite", "spawned-rt", "thiserror 2.0.16", "tracing", @@ -5852,9 +5856,9 @@ dependencies = [ [[package]] name = "spawned-rt" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76347472cc448d47dbf9f67541fde19dbb054793e8e0546ce8917bfb695e1b56" +checksum = "42396ff1bc8bfdcad31f099a3af74b4830fb7bdc09a70d843dcfa8bab74ecea4" dependencies = [ "crossbeam 0.7.3", "tokio", diff --git a/crates/networking/p2p/discv4/peer_table.rs b/crates/networking/p2p/discv4/peer_table.rs index 185224ce51d..51d85eb7bf3 100644 --- a/crates/networking/p2p/discv4/peer_table.rs +++ b/crates/networking/p2p/discv4/peer_table.rs @@ -1,7 +1,7 @@ use crate::{ discv4::server::MAX_NODES_IN_NEIGHBORS_PACKET, metrics::METRICS, - rlpx::{self, connection::server::RLPxConnection, p2p::Capability}, + rlpx::{connection::server::PeerConnection, p2p::Capability}, types::{Node, NodeRecord}, }; use ethrex_common::{H256, U256}; @@ -10,15 +10,12 @@ use spawned_concurrency::{ error::GenServerError, tasks::{CallResponse, CastResponse, GenServer, GenServerHandle}, }; -use spawned_rt::tasks::mpsc; use std::{ collections::{BTreeMap, HashSet, btree_map::Entry}, net::IpAddr, - sync::Arc, time::{Duration, Instant}, }; use thiserror::Error; -use tokio::sync::Mutex; use tracing::{debug, info}; const MAX_SCORE: i64 = 50; @@ -27,6 +24,12 @@ const MIN_SCORE: i64 = -50; const MIN_SCORE_CRITICAL: i64 = MIN_SCORE * 3; /// Maximum amount of FindNode messages sent to a single node. const MAX_FIND_NODE_PER_PEER: u64 = 20; +/// Score weight for the load balancing function. +const SCORE_WEIGHT: i64 = 1; +/// Weight for amount of requests being handled by the peer for the load balancing function. +const REQUESTS_WEIGHT: i64 = 1; +/// Max amount of ongoing requests per peer. +const MAX_CONCURRENT_REQUESTS_PER_PEER: i64 = 100; #[derive(Debug, Clone)] pub struct Contact { @@ -85,19 +88,18 @@ pub struct PeerData { /// It is only valid as long as is_connected is true pub is_connection_inbound: bool, /// communication channels between the peer data and its active connection - pub channels: Option, - /// This tracks if a peer is being used by a task - /// So we can't use it yet - in_use: bool, + pub connection: Option, /// This tracks the score of a peer score: i64, + /// Track the amount of concurrent requests this peer is handling + requests: i64, } impl PeerData { pub fn new( node: Node, record: Option, - channels: Option, + connection: Option, capabilities: Vec, ) -> Self { Self { @@ -105,47 +107,32 @@ impl PeerData { record, supported_capabilities: capabilities, is_connection_inbound: false, - channels, - in_use: false, + connection, score: Default::default(), + requests: Default::default(), } } } -#[derive(Debug, Clone)] -/// Holds the respective sender and receiver ends of the communication channels between the peer data and its active connection -pub struct PeerChannels { - pub connection: GenServerHandle, - pub receiver: Arc>>, +#[derive(Clone, Debug)] +pub struct PeerTable { + handle: GenServerHandle, } -impl PeerChannels { - /// Sets up the communication channels for the peer - /// Returns the channel endpoints to send to the active connection's listen loop - pub(crate) fn create( - connection: GenServerHandle, - ) -> (Self, mpsc::Sender) { - let (connection_sender, receiver) = mpsc::channel::(); - ( - Self { - connection, - receiver: Arc::new(Mutex::new(receiver)), - }, - connection_sender, - ) +impl PeerTable { + pub fn spawn() -> PeerTable { + PeerTable { + handle: PeerTableServer::default().start(), + } } -} -#[derive(Clone, Debug)] -pub struct PeerTableHandle(GenServerHandle); -impl PeerTableHandle { /// We received a list of Nodes to contact. No conection has been established yet. pub async fn new_contacts( &mut self, nodes: Vec, local_node_id: H256, ) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::NewContacts { nodes, local_node_id, @@ -158,13 +145,13 @@ impl PeerTableHandle { pub async fn new_connected_peer( &mut self, node: Node, - channels: PeerChannels, + connection: PeerConnection, capabilities: Vec, ) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::NewConnectedPeer { node, - channels, + connection, capabilities, }) .await?; @@ -173,37 +160,39 @@ impl PeerTableHandle { /// Remove from list of connected peers. pub async fn remove_peer(&mut self, node_id: H256) -> Result<(), PeerTableError> { - self.0.cast(CastMessage::RemovePeer { node_id }).await?; + self.handle + .cast(CastMessage::RemovePeer { node_id }) + .await?; Ok(()) } - /// Mark node as not wanted - pub async fn set_unwanted(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 - .cast(CastMessage::SetUnwanted { node_id: *node_id }) + /// Increment the number of ongoing requests for this peer + pub async fn inc_requests(&mut self, node_id: H256) -> Result<(), PeerTableError> { + self.handle + .cast(CastMessage::IncRequests { node_id }) .await?; Ok(()) } - /// Mark peer as in use - pub async fn mark_in_use(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 - .cast(CastMessage::MarkInUse { node_id: *node_id }) + /// Decrement the number of ongoing requests for this peer + pub async fn dec_requests(&mut self, node_id: H256) -> Result<(), PeerTableError> { + self.handle + .cast(CastMessage::DecRequests { node_id }) .await?; Ok(()) } - /// Remove "in use" mark for peer - pub async fn free_peer(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 - .cast(CastMessage::FreePeer { node_id: *node_id }) + /// Mark node as not wanted + pub async fn set_unwanted(&mut self, node_id: &H256) -> Result<(), PeerTableError> { + self.handle + .cast(CastMessage::SetUnwanted { node_id: *node_id }) .await?; Ok(()) } /// Record a successful connection, used to score peers pub async fn record_success(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::RecordSuccess { node_id: *node_id }) .await?; Ok(()) @@ -211,23 +200,15 @@ impl PeerTableHandle { /// Record a failed connection, used to score peers pub async fn record_failure(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::RecordFailure { node_id: *node_id }) .await?; Ok(()) } - /// Remove "in use" mark for peer, and record a failed connection. - pub async fn free_with_failure(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 - .cast(CastMessage::FreeWithFailure { node_id: *node_id }) - .await?; - Ok(()) - } - /// Record a critical failure for connection, used to score peers pub async fn record_critical_failure(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::RecordCriticalFailure { node_id: *node_id }) .await?; Ok(()) @@ -239,7 +220,7 @@ impl PeerTableHandle { node_id: &H256, hash: H256, ) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::RecordPingSent { node_id: *node_id, hash, @@ -254,7 +235,7 @@ impl PeerTableHandle { node_id: &H256, ping_hash: H256, ) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::RecordPongReceived { node_id: *node_id, ping_hash, @@ -265,7 +246,7 @@ impl PeerTableHandle { /// Set peer as disposable pub async fn set_disposable(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::SetDisposable { node_id: *node_id }) .await?; Ok(()) @@ -273,7 +254,7 @@ impl PeerTableHandle { /// Increment FindNode message counter for peer pub async fn increment_find_node_sent(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::IncrementFindNodeSent { node_id: *node_id }) .await?; Ok(()) @@ -281,7 +262,7 @@ impl PeerTableHandle { /// Set flag for peer that tells that it knows us pub async fn knows_us(&mut self, node_id: &H256) -> Result<(), PeerTableError> { - self.0 + self.handle .cast(CastMessage::KnowsUs { node_id: *node_id }) .await?; Ok(()) @@ -289,13 +270,13 @@ impl PeerTableHandle { /// Remove from list of contacts the ones marked as disposable pub async fn prune(&mut self) -> Result<(), PeerTableError> { - self.0.cast(CastMessage::Prune).await?; + self.handle.cast(CastMessage::Prune).await?; Ok(()) } /// Return the amount of connected peers pub async fn peer_count(&mut self) -> Result { - match self.0.call(CallMessage::PeerCount).await? { + match self.handle.call(CallMessage::PeerCount).await? { OutMessage::PeerCount(peer_count) => Ok(peer_count), _ => unreachable!(), } @@ -307,7 +288,7 @@ impl PeerTableHandle { capabilities: &[Capability], ) -> Result { match self - .0 + .handle .call(CallMessage::PeerCountByCapabilities { capabilities: capabilities.to_vec(), }) @@ -318,14 +299,6 @@ impl PeerTableHandle { } } - /// Remove the "in use" mark for all peers - pub async fn free_peers(&mut self) -> Result { - match self.0.call(CallMessage::FreePeers).await? { - OutMessage::PeerCount(result) => Ok(result), - _ => unreachable!(), - } - } - /// Check if target number of contacts and connected peers is reached pub async fn target_reached( &mut self, @@ -333,7 +306,7 @@ impl PeerTableHandle { target_peers: usize, ) -> Result { match self - .0 + .handle .call(CallMessage::TargetReached { target_contacts, target_peers, @@ -351,7 +324,7 @@ impl PeerTableHandle { amount: usize, ) -> Result, PeerTableError> { match self - .0 + .handle .call(CallMessage::GetContactsToInitiate(amount)) .await? { @@ -362,7 +335,7 @@ impl PeerTableHandle { /// Get all contacts available for lookup pub async fn get_contacts_for_lookup(&mut self) -> Result, PeerTableError> { - match self.0.call(CallMessage::GetContactsForLookup).await? { + match self.handle.call(CallMessage::GetContactsForLookup).await? { OutMessage::Contacts(contacts) => Ok(contacts), _ => unreachable!(), } @@ -374,7 +347,7 @@ impl PeerTableHandle { revalidation_interval: Duration, ) -> Result, PeerTableError> { match self - .0 + .handle .call(CallMessage::GetContactsToRevalidate(revalidation_interval)) .await? { @@ -387,9 +360,9 @@ impl PeerTableHandle { pub async fn get_best_peer( &mut self, capabilities: &[Capability], - ) -> Result, PeerTableError> { + ) -> Result, PeerTableError> { match self - .0 + .handle .call(CallMessage::GetBestPeer { capabilities: capabilities.to_vec(), }) @@ -397,29 +370,8 @@ impl PeerTableHandle { { OutMessage::FoundPeer { node_id, - peer_channels, - } => Ok(Some((node_id, peer_channels))), - OutMessage::NotFound => Ok(None), - _ => unreachable!(), - } - } - - /// Returns the peer with the highest score and its peer channel, and marks it as used, if found. - pub async fn use_best_peer( - &mut self, - capabilities: &[Capability], - ) -> Result, PeerTableError> { - match self - .0 - .call(CallMessage::UseBestPeer { - capabilities: capabilities.to_vec(), - }) - .await? - { - OutMessage::FoundPeer { - node_id, - peer_channels, - } => Ok(Some((node_id, peer_channels))), + connection, + } => Ok(Some((node_id, connection))), OutMessage::NotFound => Ok(None), _ => unreachable!(), } @@ -428,7 +380,7 @@ impl PeerTableHandle { /// Get peer score pub async fn get_score(&mut self, node_id: &H256) -> Result { match self - .0 + .handle .call(CallMessage::GetScore { node_id: *node_id }) .await? { @@ -439,7 +391,7 @@ impl PeerTableHandle { /// Get list of connected peers pub async fn get_connected_nodes(&mut self) -> Result, PeerTableError> { - if let OutMessage::Nodes(nodes) = self.0.call(CallMessage::GetConnectedNodes).await? { + if let OutMessage::Nodes(nodes) = self.handle.call(CallMessage::GetConnectedNodes).await? { Ok(nodes) } else { unreachable!() @@ -449,8 +401,12 @@ impl PeerTableHandle { /// Get list of connected peers with their capabilities pub async fn get_peers_with_capabilities( &mut self, - ) -> Result)>, PeerTableError> { - match self.0.call(CallMessage::GetPeersWithCapabilities).await? { + ) -> Result)>, PeerTableError> { + match self + .handle + .call(CallMessage::GetPeersWithCapabilities) + .await? + { OutMessage::PeersWithCapabilities(peers_with_capabilities) => { Ok(peers_with_capabilities) } @@ -459,18 +415,18 @@ impl PeerTableHandle { } /// Get peer channels for communication - pub async fn get_peer_channels( + pub async fn get_peer_connections( &mut self, capabilities: &[Capability], - ) -> Result, PeerTableError> { + ) -> Result, PeerTableError> { match self - .0 - .call(CallMessage::GetPeerChannels { + .handle + .call(CallMessage::GetPeerConnections { capabilities: capabilities.to_vec(), }) .await? { - OutMessage::PeerChannels(peer_channels) => Ok(peer_channels), + OutMessage::PeerConnection(connection) => Ok(connection), _ => unreachable!(), } } @@ -478,7 +434,7 @@ impl PeerTableHandle { /// Insert new peer if it is new. Returns a boolean telling if it was new or not. pub async fn insert_if_new(&mut self, node: &Node) -> Result { match self - .0 + .handle .call(CallMessage::InsertIfNew { node: node.clone() }) .await? { @@ -493,7 +449,7 @@ impl PeerTableHandle { node_id: &H256, sender_ip: IpAddr, ) -> Result { - self.0 + self.handle .call(CallMessage::ValidateContact { node_id: *node_id, sender_ip, @@ -505,7 +461,7 @@ impl PeerTableHandle { /// Get closest nodes according to kademlia's distance pub async fn get_closest_nodes(&mut self, node_id: &H256) -> Result, PeerTableError> { match self - .0 + .handle .call(CallMessage::GetClosestNodes { node_id: *node_id }) .await? { @@ -516,7 +472,7 @@ impl PeerTableHandle { /// Get metadata associated to peer pub async fn get_peers_data(&mut self) -> Result, PeerTableError> { - match self.0.call(CallMessage::GetPeersData).await? { + match self.handle.call(CallMessage::GetPeersData).await? { OutMessage::PeersData(peers_data) => Ok(peers_data), _ => unreachable!(), } @@ -526,9 +482,9 @@ impl PeerTableHandle { pub async fn get_random_peer( &mut self, capabilities: &[Capability], - ) -> Result, PeerTableError> { + ) -> Result, PeerTableError> { match self - .0 + .handle .call(CallMessage::GetRandomPeer { capabilities: capabilities.to_vec(), }) @@ -536,8 +492,8 @@ impl PeerTableHandle { { OutMessage::FoundPeer { node_id, - peer_channels, - } => Ok(Some((node_id, peer_channels))), + connection, + } => Ok(Some((node_id, connection))), OutMessage::NotFound => Ok(None), _ => unreachable!(), } @@ -545,57 +501,45 @@ impl PeerTableHandle { } #[derive(Debug, Default)] -pub struct PeerTable { +struct PeerTableServer { contacts: BTreeMap, peers: BTreeMap, already_tried_peers: HashSet, discarded_contacts: HashSet, } -impl PeerTable { - pub fn spawn() -> PeerTableHandle { - PeerTableHandle(Self::default().start()) - } - +impl PeerTableServer { // Internal functions // - fn get_best_peer(&self, capabilities: &[Capability]) -> Option<(H256, PeerChannels)> { + // Weighting function used to select best peer + // TODO: Review this formula and weight constants. + fn weight_peer(&self, score: &i64, requests: &i64) -> i64 { + score * SCORE_WEIGHT - requests * REQUESTS_WEIGHT + } + + fn get_best_peer(&self, capabilities: &[Capability]) -> Option<(H256, PeerConnection)> { self.peers .iter() // We filter only to those peers which are useful to us .filter_map(|(id, peer_data)| { - // If the peer is already in use right now, we skip it - if peer_data.in_use { - return None; - } - - // if the peer doesn't have any of the capabilities we need, we skip it - if !capabilities - .iter() - .any(|cap| peer_data.supported_capabilities.contains(cap)) + // Skip the peer if it has too many ongoing requests or if it doesn't match + // the capabilities + if peer_data.requests > MAX_CONCURRENT_REQUESTS_PER_PEER + || !capabilities + .iter() + .any(|cap| peer_data.supported_capabilities.contains(cap)) { - return None; - } - - // if the peer doesn't have the channel open, we skip it. - let peer_channel = peer_data.channels.clone()?; + None + } else { + // if the peer doesn't have the channel open, we skip it. + let connection = peer_data.connection.clone()?; - // We return the id, the score and the channel to connect with. - Some((*id, peer_data.score, peer_channel)) + // We return the id, the score and the channel to connect with. + Some((*id, peer_data.score, peer_data.requests, connection)) + } }) - .max_by_key(|(_, score, _)| *score) - .map(|(k, _, v)| (k, v)) - } - - /// Returns the peer with the highest score and its peer channel, and marks it as used, if found. - fn use_best_peer(&mut self, capabilities: &[Capability]) -> Option<(H256, PeerChannels)> { - let (peer_id, peer_channel) = self.get_best_peer(capabilities)?; - - self.peers - .entry(peer_id) - .and_modify(|peer_data| peer_data.in_use = true); - - Some((peer_id, peer_channel)) + .max_by_key(|(_, score, reqs, _)| self.weight_peer(score, reqs)) + .map(|(k, _, _, v)| (k, v)) } fn prune(&mut self) { @@ -725,7 +669,10 @@ impl PeerTable { .len() } - fn get_peer_channels(&mut self, capabilities: Vec) -> Vec<(H256, PeerChannels)> { + fn get_peer_connections( + &mut self, + capabilities: Vec, + ) -> Vec<(H256, PeerConnection)> { self.peers .iter() .filter_map(|(peer_id, peer_data)| { @@ -737,15 +684,15 @@ impl PeerTable { return None; } peer_data - .channels + .connection .clone() - .map(|peer_channels| (*peer_id, peer_channels)) + .map(|connection| (*peer_id, connection)) }) .collect() } - fn get_random_peer(&mut self, capabilities: Vec) -> Option<(H256, PeerChannels)> { - let peers: Vec<(H256, PeerChannels)> = self + fn get_random_peer(&mut self, capabilities: Vec) -> Option<(H256, PeerConnection)> { + let peers: Vec<(H256, PeerConnection)> = self .peers .iter() .filter_map(|(node_id, peer_data)| { @@ -757,9 +704,9 @@ impl PeerTable { return None; } peer_data - .channels + .connection .clone() - .map(|peer_channels| (*node_id, peer_channels)) + .map(|connection| (*node_id, connection)) }) .collect(); peers.choose(&mut rand::rngs::OsRng).cloned() @@ -790,26 +737,26 @@ impl PeerTable { } #[derive(Clone, Debug)] -pub enum CastMessage { +enum CastMessage { NewContacts { nodes: Vec, local_node_id: H256, }, NewConnectedPeer { node: Node, - channels: PeerChannels, + connection: PeerConnection, capabilities: Vec, }, RemovePeer { node_id: H256, }, - SetUnwanted { + IncRequests { node_id: H256, }, - MarkInUse { + DecRequests { node_id: H256, }, - FreePeer { + SetUnwanted { node_id: H256, }, RecordSuccess { @@ -818,9 +765,6 @@ pub enum CastMessage { RecordFailure { node_id: H256, }, - FreeWithFailure { - node_id: H256, - }, RecordCriticalFailure { node_id: H256, }, @@ -845,12 +789,11 @@ pub enum CastMessage { } #[derive(Clone, Debug)] -pub enum CallMessage { +enum CallMessage { PeerCount, PeerCountByCapabilities { capabilities: Vec, }, - FreePeers, TargetReached { target_contacts: usize, target_peers: usize, @@ -861,15 +804,12 @@ pub enum CallMessage { GetBestPeer { capabilities: Vec, }, - UseBestPeer { - capabilities: Vec, - }, GetScore { node_id: H256, }, GetConnectedNodes, GetPeersWithCapabilities, - GetPeerChannels { + GetPeerConnections { capabilities: Vec, }, InsertIfNew { @@ -893,12 +833,12 @@ pub enum OutMessage { PeerCount(usize), FoundPeer { node_id: H256, - peer_channels: PeerChannels, + connection: PeerConnection, }, NotFound, PeerScore(i64), - PeersWithCapabilities(Vec<(H256, PeerChannels, Vec)>), - PeerChannels(Vec<(H256, PeerChannels)>), + PeersWithCapabilities(Vec<(H256, PeerConnection, Vec)>), + PeerConnection(Vec<(H256, PeerConnection)>), Contacts(Vec), TargetReached(bool), IsNew(bool), @@ -916,7 +856,7 @@ pub enum PeerTableError { InternalError(#[from] GenServerError), } -impl GenServer for PeerTable { +impl GenServer for PeerTableServer { type CallMsg = CallMessage; type CastMsg = CastMessage; type OutMsg = OutMessage; @@ -925,7 +865,7 @@ impl GenServer for PeerTable { async fn handle_call( &mut self, message: Self::CallMsg, - _handle: &GenServerHandle, + _handle: &GenServerHandle, ) -> CallResponse { match message { CallMessage::PeerCount => { @@ -934,19 +874,6 @@ impl GenServer for PeerTable { CallMessage::PeerCountByCapabilities { capabilities } => CallResponse::Reply( OutMessage::PeerCount(self.peer_count_by_capabilities(capabilities)), ), - CallMessage::FreePeers => CallResponse::Reply(Self::OutMsg::PeerCount( - self.peers - .iter_mut() - .filter_map(|(_, peer_data)| { - if peer_data.in_use { - peer_data.in_use = false; - Some(peer_data) - } else { - None - } - }) - .count(), - )), CallMessage::TargetReached { target_contacts, target_peers, @@ -966,19 +893,9 @@ impl GenServer for PeerTable { let channels = self.get_best_peer(&capabilities); CallResponse::Reply(channels.map_or( Self::OutMsg::NotFound, - |(node_id, peer_channels)| Self::OutMsg::FoundPeer { - node_id, - peer_channels, - }, - )) - } - CallMessage::UseBestPeer { capabilities } => { - let channels = self.use_best_peer(&capabilities); - CallResponse::Reply(channels.map_or( - Self::OutMsg::NotFound, - |(node_id, peer_channels)| Self::OutMsg::FoundPeer { + |(node_id, connection)| Self::OutMsg::FoundPeer { node_id, - peer_channels, + connection, }, )) } @@ -999,10 +916,10 @@ impl GenServer for PeerTable { self.peers .iter() .filter_map(|(peer_id, peer_data)| { - peer_data.channels.clone().map(|peer_channels| { + peer_data.connection.clone().map(|connection| { ( *peer_id, - peer_channels, + connection, peer_data.supported_capabilities.clone(), ) }) @@ -1010,8 +927,8 @@ impl GenServer for PeerTable { .collect(), )) } - CallMessage::GetPeerChannels { capabilities } => CallResponse::Reply( - OutMessage::PeerChannels(self.get_peer_channels(capabilities)), + CallMessage::GetPeerConnections { capabilities } => CallResponse::Reply( + OutMessage::PeerConnection(self.get_peer_connections(capabilities)), ), CallMessage::InsertIfNew { node } => CallResponse::Reply(Self::OutMsg::IsNew( match self.contacts.entry(node.node_id()) { @@ -1032,10 +949,10 @@ impl GenServer for PeerTable { self.peers.values().cloned().collect(), )), CallMessage::GetRandomPeer { capabilities } => CallResponse::Reply( - if let Some((node_id, peer_channels)) = self.get_random_peer(capabilities) { + if let Some((node_id, connection)) = self.get_random_peer(capabilities) { OutMessage::FoundPeer { node_id, - peer_channels, + connection, } } else { OutMessage::NotFound @@ -1047,7 +964,7 @@ impl GenServer for PeerTable { async fn handle_cast( &mut self, message: Self::CastMsg, - _handle: &GenServerHandle, + _handle: &GenServerHandle, ) -> CastResponse { match message { CastMessage::NewContacts { @@ -1058,31 +975,31 @@ impl GenServer for PeerTable { } CastMessage::NewConnectedPeer { node, - channels, + connection, capabilities, } => { debug!("New peer connected"); let new_peer_id = node.node_id(); - let new_peer = PeerData::new(node, None, Some(channels), capabilities); + let new_peer = PeerData::new(node, None, Some(connection), capabilities); self.peers.insert(new_peer_id, new_peer); } CastMessage::RemovePeer { node_id } => { self.peers.remove(&node_id); } - CastMessage::SetUnwanted { node_id } => { - self.contacts + CastMessage::IncRequests { node_id } => { + self.peers .entry(node_id) - .and_modify(|contact| contact.unwanted = true); + .and_modify(|peer_data| peer_data.requests += 1); } - CastMessage::MarkInUse { node_id } => { + CastMessage::DecRequests { node_id } => { self.peers .entry(node_id) - .and_modify(|peer_data| peer_data.in_use = true); + .and_modify(|peer_data| peer_data.requests -= 1); } - CastMessage::FreePeer { node_id } => { - self.peers + CastMessage::SetUnwanted { node_id } => { + self.contacts .entry(node_id) - .and_modify(|peer_data| peer_data.in_use = false); + .and_modify(|contact| contact.unwanted = true); } CastMessage::RecordSuccess { node_id } => { self.peers @@ -1094,12 +1011,6 @@ impl GenServer for PeerTable { .entry(node_id) .and_modify(|peer_data| peer_data.score = (peer_data.score - 1).max(MIN_SCORE)); } - CastMessage::FreeWithFailure { node_id } => { - self.peers.entry(node_id).and_modify(|peer_data| { - peer_data.in_use = false; - peer_data.score = (peer_data.score - 1).max(MIN_SCORE); - }); - } CastMessage::RecordCriticalFailure { node_id } => { self.peers .entry(node_id) diff --git a/crates/networking/p2p/discv4/server.rs b/crates/networking/p2p/discv4/server.rs index 718feb0efe1..0a897a57119 100644 --- a/crates/networking/p2p/discv4/server.rs +++ b/crates/networking/p2p/discv4/server.rs @@ -5,7 +5,7 @@ use crate::{ ENRResponseMessage, FindNodeMessage, Message, NeighborsMessage, Packet, PacketDecodeErr, PingMessage, PongMessage, }, - peer_table::{Contact, OutMessage as PeerTableOutMessage, PeerTableError, PeerTableHandle}, + peer_table::{Contact, OutMessage as PeerTableOutMessage, PeerTable, PeerTableError}, }, metrics::METRICS, types::{Endpoint, Node, NodeRecord}, @@ -82,7 +82,7 @@ pub struct DiscoveryServer { local_node_record: Arc>, signer: SecretKey, udp_socket: Arc, - peer_table: PeerTableHandle, + peer_table: PeerTable, } impl DiscoveryServer { @@ -90,7 +90,7 @@ impl DiscoveryServer { local_node: Node, signer: SecretKey, udp_socket: Arc, - mut peer_table: PeerTableHandle, + mut peer_table: PeerTable, bootnodes: Vec, ) -> Result<(), DiscoveryServerError> { info!("Starting Discovery Server"); diff --git a/crates/networking/p2p/metrics.rs b/crates/networking/p2p/metrics.rs index ff083436996..c17af704654 100644 --- a/crates/networking/p2p/metrics.rs +++ b/crates/networking/p2p/metrics.rs @@ -12,7 +12,7 @@ use ethrex_common::H256; use prometheus::{Gauge, IntCounter, Registry}; use tokio::sync::Mutex; -use crate::rlpx::{error::RLPxError, p2p::DisconnectReason}; +use crate::rlpx::{error::PeerConnectionError, p2p::DisconnectReason}; pub static METRICS: LazyLock = LazyLock::new(Metrics::default); @@ -279,7 +279,7 @@ impl Metrics { .and_modify(|count| *count -= 1); } - pub async fn record_new_rlpx_conn_failure(&self, reason: RLPxError) { + pub async fn record_new_rlpx_conn_failure(&self, reason: PeerConnectionError) { let mut failures_grouped_by_reason = self.connection_attempt_failures.lock().await; self.update_failures_grouped_by_reason(&mut failures_grouped_by_reason, &reason) @@ -326,189 +326,207 @@ impl Metrics { pub async fn update_failures_grouped_by_reason( &self, failures_grouped_by_reason: &mut BTreeMap, - failure_reason: &RLPxError, + failure_reason: &PeerConnectionError, ) { match failure_reason { - RLPxError::HandshakeError(reason) => { + PeerConnectionError::HandshakeError(reason) => { failures_grouped_by_reason .entry(format!("HandshakeError - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::StateError(reason) => { + PeerConnectionError::StateError(reason) => { failures_grouped_by_reason .entry(format!("StateError - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::NoMatchingCapabilities() => { + PeerConnectionError::NoMatchingCapabilities() => { failures_grouped_by_reason .entry("NoMatchingCapabilities".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::Disconnected() => { + PeerConnectionError::Disconnected => { failures_grouped_by_reason .entry("Disconnected".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::DisconnectReceived(disconnect_reason) => { + PeerConnectionError::DisconnectReceived(disconnect_reason) => { failures_grouped_by_reason .entry(format!("DisconnectReceived - {disconnect_reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::DisconnectSent(disconnect_reason) => { + PeerConnectionError::DisconnectSent(disconnect_reason) => { failures_grouped_by_reason .entry(format!("DisconnectSent - {disconnect_reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::NotFound(reason) => { + PeerConnectionError::NotFound(reason) => { failures_grouped_by_reason .entry(format!("NotFound - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::InvalidPeerId() => { + PeerConnectionError::InvalidPeerId() => { failures_grouped_by_reason .entry("InvalidPeerId".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::InvalidRecoveryId() => { + PeerConnectionError::InvalidRecoveryId() => { failures_grouped_by_reason .entry("InvalidRecoveryId".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::InvalidMessageLength() => { + PeerConnectionError::InvalidMessageLength() => { failures_grouped_by_reason .entry("InvalidMessageLength".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::MessageNotHandled(reason) => { + PeerConnectionError::ExpectedRequestId(reason) => { + failures_grouped_by_reason + .entry(format!("ExpectedRequestId - {reason}")) + .and_modify(|e| *e += 1) + .or_insert(1); + } + PeerConnectionError::MessageNotHandled(reason) => { failures_grouped_by_reason .entry(format!("MessageNotHandled - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::BadRequest(reason) => { + PeerConnectionError::BadRequest(reason) => { failures_grouped_by_reason .entry(format!("BadRequest - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::RLPDecodeError(rlpdecode_error) => { + PeerConnectionError::RLPDecodeError(rlpdecode_error) => { failures_grouped_by_reason .entry(format!("RLPDecodeError - {rlpdecode_error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::RLPEncodeError(rlpencode_error) => { + PeerConnectionError::RLPEncodeError(rlpencode_error) => { failures_grouped_by_reason .entry(format!("RLPEncodeError - {rlpencode_error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::StoreError(store_error) => { + PeerConnectionError::StoreError(store_error) => { failures_grouped_by_reason .entry(format!("StoreError - {store_error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::CryptographyError(reason) => { + PeerConnectionError::CryptographyError(reason) => { failures_grouped_by_reason .entry(format!("CryptographyError - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::BroadcastError(reason) => { + PeerConnectionError::BroadcastError(reason) => { failures_grouped_by_reason .entry(format!("BroadcastError - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::RecvError(recv_error) => { + PeerConnectionError::RecvError(recv_error) => { failures_grouped_by_reason .entry(format!("RecvError - {recv_error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::SendMessage(reason) => { + PeerConnectionError::SendMessage(reason) => { failures_grouped_by_reason .entry(format!("SendMessage - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::MempoolError(mempool_error) => { + PeerConnectionError::MempoolError(mempool_error) => { failures_grouped_by_reason .entry(format!("MempoolError - {mempool_error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::IoError(error) => { + PeerConnectionError::IoError(error) => { failures_grouped_by_reason .entry(format!("IoError - {error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::InvalidMessageFrame(reason) => { + PeerConnectionError::InvalidMessageFrame(reason) => { failures_grouped_by_reason .entry(format!("InvalidMessageFrame - {reason}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::IncompatibleProtocol => { + PeerConnectionError::IncompatibleProtocol => { failures_grouped_by_reason .entry("IncompatibleProtocol".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::InvalidBlockRange => { + PeerConnectionError::InvalidBlockRange => { failures_grouped_by_reason .entry("InvalidBlockRange".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::RollupStoreError(error) => { + PeerConnectionError::RollupStoreError(error) => { failures_grouped_by_reason .entry(format!("RollupStoreError - {error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::BlockchainError(error) => { + PeerConnectionError::BlockchainError(error) => { failures_grouped_by_reason .entry(format!("BlockchainError - {error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::InternalError(error) => { + PeerConnectionError::InternalError(error) => { failures_grouped_by_reason .entry(format!("InternalError - {error}")) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::L2CapabilityNotNegotiated => { + PeerConnectionError::L2CapabilityNotNegotiated => { failures_grouped_by_reason .entry("L2CapabilityNotNegotiated".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::InvalidBlockRangeUpdate => { + PeerConnectionError::InvalidBlockRangeUpdate => { failures_grouped_by_reason .entry("InvalidBlockRangeUpdate".to_owned()) .and_modify(|e| *e += 1) .or_insert(1); } - RLPxError::PeerTableError(error) => { + PeerConnectionError::PeerTableError(error) => { failures_grouped_by_reason .entry(format!("InternalError - {error}")) .and_modify(|e| *e += 1) .or_insert(1); } + PeerConnectionError::Timeout => { + failures_grouped_by_reason + .entry("Timeout".to_owned()) + .and_modify(|e| *e += 1) + .or_insert(1); + } + PeerConnectionError::UnexpectedResponse(_, _) => { + failures_grouped_by_reason + .entry("UnexpectedResponse".to_owned()) + .and_modify(|e| *e += 1) + .or_insert(1); + } } } } diff --git a/crates/networking/p2p/network.rs b/crates/networking/p2p/network.rs index 1ef70c87ae9..45affd8f231 100644 --- a/crates/networking/p2p/network.rs +++ b/crates/networking/p2p/network.rs @@ -1,11 +1,11 @@ use crate::{ discv4::{ - peer_table::{PeerData, PeerTableHandle}, + peer_table::{PeerData, PeerTable}, server::{DiscoveryServer, DiscoveryServerError}, }, metrics::METRICS, rlpx::{ - connection::server::{RLPxConnBroadcastSender, RLPxConnection}, + connection::server::{PeerConnBroadcastSender, PeerConnection}, initiator::RLPxInitiator, l2::l2_connection::P2PBasedContext, message::Message, @@ -37,10 +37,10 @@ pub const MAX_MESSAGES_TO_BROADCAST: usize = 100000; pub struct P2PContext { pub tracker: TaskTracker, pub signer: SecretKey, - pub table: PeerTableHandle, + pub table: PeerTable, pub storage: Store, pub blockchain: Arc, - pub(crate) broadcast: RLPxConnBroadcastSender, + pub(crate) broadcast: PeerConnBroadcastSender, pub local_node: Node, pub local_node_record: Arc>, pub client_version: String, @@ -55,7 +55,7 @@ impl P2PContext { local_node_record: Arc>, tracker: TaskTracker, signer: SecretKey, - peer_table: PeerTableHandle, + peer_table: PeerTable, storage: Store, blockchain: Arc, client_version: String, @@ -145,7 +145,7 @@ pub(crate) async fn serve_p2p_requests(context: P2PContext) { continue; } - let _ = RLPxConnection::spawn_as_receiver(context.clone(), peer_addr, stream).await; + let _ = PeerConnection::spawn_as_receiver(context.clone(), peer_addr, stream).await; } } @@ -158,17 +158,14 @@ fn listener(tcp_addr: SocketAddr) -> Result { tcp_socket.listen(50) } -pub async fn periodically_show_peer_stats( - blockchain: Arc, - mut peer_table: PeerTableHandle, -) { +pub async fn periodically_show_peer_stats(blockchain: Arc, mut peer_table: PeerTable) { periodically_show_peer_stats_during_syncing(blockchain, &mut peer_table).await; periodically_show_peer_stats_after_sync(&mut peer_table).await; } pub async fn periodically_show_peer_stats_during_syncing( blockchain: Arc, - peer_table: &mut PeerTableHandle, + peer_table: &mut PeerTable, ) { let start = std::time::Instant::now(); loop { @@ -367,7 +364,7 @@ bytecodes progress: downloaded: {bytecodes_downloaded}, elapsed: {bytecodes_down } /// Shows the amount of connected peers, active peers, and peers suitable for snap sync on a set interval -pub async fn periodically_show_peer_stats_after_sync(peer_table: &mut PeerTableHandle) { +pub async fn periodically_show_peer_stats_after_sync(peer_table: &mut PeerTable) { const INTERVAL_DURATION: tokio::time::Duration = tokio::time::Duration::from_secs(60); let mut interval = tokio::time::interval(INTERVAL_DURATION); loop { @@ -375,12 +372,12 @@ pub async fn periodically_show_peer_stats_after_sync(peer_table: &mut PeerTableH let peers: Vec = peer_table.get_peers_data().await.unwrap_or(Vec::new()); let active_peers = peers .iter() - .filter(|peer| -> bool { peer.channels.as_ref().is_some() }) + .filter(|peer| -> bool { peer.connection.as_ref().is_some() }) .count(); let snap_active_peers = peers .iter() .filter(|peer| -> bool { - peer.channels.as_ref().is_some() + peer.connection.as_ref().is_some() && SUPPORTED_SNAP_CAPABILITIES .iter() .any(|cap| peer.supported_capabilities.contains(cap)) diff --git a/crates/networking/p2p/peer_handler.rs b/crates/networking/p2p/peer_handler.rs index 431cfc6e093..0dda9cb1b48 100644 --- a/crates/networking/p2p/peer_handler.rs +++ b/crates/networking/p2p/peer_handler.rs @@ -1,26 +1,9 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet, VecDeque}, - io::ErrorKind, - path::{Path, PathBuf}, - sync::atomic::Ordering, - time::{Duration, SystemTime}, -}; - -use bytes::Bytes; -use ethrex_common::{ - BigEndianHash, H256, U256, - types::{AccountState, BlockBody, BlockHeader, Receipt, validate_block_body}, -}; -use ethrex_rlp::encode::RLPEncode; -use ethrex_storage::Store; -use ethrex_trie::Nibbles; -use ethrex_trie::{Node, verify_range}; - use crate::{ - discv4::peer_table::{PeerChannels, PeerData, PeerTable, PeerTableError, PeerTableHandle}, + discv4::peer_table::{PeerData, PeerTable, PeerTableError}, metrics::{CurrentStepValue, METRICS}, rlpx::{ - connection::server::CastMessage, + connection::server::PeerConnection, + error::PeerConnectionError, eth::{ blocks::{ BLOCK_HEADER_LIMIT, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, @@ -38,10 +21,26 @@ use crate::{ snap::encodable_to_proof, sync::{AccountStorageRoots, BlockSyncState, block_is_stale, update_pivot}, utils::{ - AccountsWithStorage, SendMessageError, dump_accounts_to_file, dump_storages_to_file, + AccountsWithStorage, dump_accounts_to_file, dump_storages_to_file, get_account_state_snapshot_file, get_account_storages_snapshot_file, }, }; +use bytes::Bytes; +use ethrex_common::{ + BigEndianHash, H256, U256, + types::{AccountState, BlockBody, BlockHeader, Receipt, validate_block_body}, +}; +use ethrex_rlp::encode::RLPEncode; +use ethrex_storage::Store; +use ethrex_trie::Nibbles; +use ethrex_trie::{Node, verify_range}; +use std::{ + collections::{BTreeMap, HashMap, HashSet, VecDeque}, + io::ErrorKind, + path::{Path, PathBuf}, + sync::atomic::Ordering, + time::{Duration, SystemTime}, +}; use tracing::{debug, error, info, trace, warn}; pub const PEER_REPLY_TIMEOUT: Duration = Duration::from_secs(15); pub const PEER_SELECT_RETRY_ATTEMPTS: u32 = 3; @@ -68,7 +67,7 @@ pub const MAX_BLOCK_BODIES_TO_REQUEST: usize = 128; /// An abstraction over the [Kademlia] containing logic to make requests to peers #[derive(Debug, Clone)] pub struct PeerHandler { - pub peer_table: PeerTableHandle, + pub peer_table: PeerTable, } pub enum BlockRequestOrder { @@ -96,7 +95,8 @@ struct StorageTask { async fn ask_peer_head_number( peer_id: H256, - peer_channel: &mut PeerChannels, + connection: &mut PeerConnection, + peer_table: &mut PeerTable, sync_head: H256, retries: i32, ) -> Result { @@ -111,21 +111,16 @@ async fn ask_peer_head_number( reverse: false, }); - peer_channel - .connection - .cast(CastMessage::BackendMessage(request.clone())) - .await - .map_err(|e| PeerHandlerError::SendMessageToPeer(e.to_string()))?; - debug!("(Retry {retries}) Requesting sync head {sync_head:?} to peer {peer_id}"); - match tokio::time::timeout(Duration::from_millis(500), async move { - peer_channel.receiver.lock().await.recv().await - }) - .await + match PeerHandler::make_request(peer_table, peer_id, connection, request, PEER_REPLY_TIMEOUT) + .await { - Ok(Some(RLPxMessage::BlockHeaders(BlockHeaders { id, block_headers }))) => { - if id == request_id && !block_headers.is_empty() { + Ok(RLPxMessage::BlockHeaders(BlockHeaders { + id: _, + block_headers, + })) => { + if !block_headers.is_empty() { let sync_head_number = block_headers .last() .ok_or(PeerHandlerError::BlockHeaders)? @@ -135,17 +130,19 @@ async fn ask_peer_head_number( ); Ok(sync_head_number) } else { - Err(PeerHandlerError::UnexpectedResponseFromPeer(peer_id)) + Err(PeerHandlerError::EmptyResponseFromPeer(peer_id)) } } - Ok(None) => Err(PeerHandlerError::ReceiveMessageFromPeer(peer_id)), Ok(_other_msgs) => Err(PeerHandlerError::UnexpectedResponseFromPeer(peer_id)), - Err(_err) => Err(PeerHandlerError::ReceiveMessageFromPeerTimeout(peer_id)), + Err(PeerConnectionError::Timeout) => { + Err(PeerHandlerError::ReceiveMessageFromPeerTimeout(peer_id)) + } + Err(_other_err) => Err(PeerHandlerError::ReceiveMessageFromPeer(peer_id)), } } impl PeerHandler { - pub fn new(peer_table: PeerTableHandle) -> PeerHandler { + pub fn new(peer_table: PeerTable) -> PeerHandler { Self { peer_table } } @@ -156,12 +153,27 @@ impl PeerHandler { PeerHandler::new(dummy_peer_table) } + async fn make_request( + // TODO: We should receive the PeerHandler (or self) instead, but since it is not yet spawnified it cannot be shared + // Fix this to avoid passing the PeerTable as a parameter + peer_table: &mut PeerTable, + peer_id: H256, + connection: &mut PeerConnection, + message: RLPxMessage, + timeout: Duration, + ) -> Result { + peer_table.inc_requests(peer_id).await?; + let result = connection.outgoing_request(message, timeout).await; + peer_table.dec_requests(peer_id).await?; + result + } + /// Returns a random node id and the channel ends to an active peer connection that supports the given capability /// It doesn't guarantee that the selected peer is not currently busy async fn get_random_peer( &mut self, capabilities: &[Capability], - ) -> Result, PeerHandlerError> { + ) -> Result, PeerHandlerError> { return Ok(self.peer_table.get_random_peer(capabilities).await?); } @@ -196,13 +208,21 @@ impl PeerHandler { // sync_head might be invalid return Ok(None); } - let peer_channels = self + let peer_connection = self .peer_table - .get_peer_channels(&SUPPORTED_ETH_CAPABILITIES) + .get_peer_connections(&SUPPORTED_ETH_CAPABILITIES) .await?; - for (peer_id, mut peer_channel) in peer_channels { - match ask_peer_head_number(peer_id, &mut peer_channel, sync_head, retries).await { + for (peer_id, mut connection) in peer_connection { + match ask_peer_head_number( + peer_id, + &mut connection, + &mut self.peer_table, + sync_head, + retries, + ) + .await + { Ok(number) => { sync_head_number = number; if number != 0 { @@ -260,7 +280,7 @@ impl PeerHandler { // channel to send the tasks to the peers let (task_sender, mut task_receiver) = - tokio::sync::mpsc::channel::<(Vec, H256, PeerChannels, u64, u64)>(1000); + tokio::sync::mpsc::channel::<(Vec, H256, PeerConnection, u64, u64)>(1000); let mut current_show = 0; @@ -271,12 +291,12 @@ impl PeerHandler { *METRICS.headers_download_start_time.lock().await = Some(SystemTime::now()); loop { - if let Ok((headers, peer_id, _peer_channel, startblock, previous_chunk_limit)) = + if let Ok((headers, peer_id, _connection, startblock, previous_chunk_limit)) = task_receiver.try_recv() { trace!("We received a download chunk from peer"); if headers.is_empty() { - self.peer_table.free_with_failure(&peer_id).await?; + self.peer_table.record_failure(&peer_id).await?; debug!("Failed to download chunk from peer. Downloader {peer_id} freed"); @@ -321,12 +341,11 @@ impl PeerHandler { } self.peer_table.record_success(&peer_id).await?; - self.peer_table.free_peer(&peer_id).await?; debug!("Downloader {peer_id} freed"); } - let Some((peer_id, mut peer_channel)) = self + let Some((peer_id, mut connection)) = self .peer_table - .use_best_peer(&SUPPORTED_ETH_CAPABILITIES) + .get_best_peer(&SUPPORTED_ETH_CAPABILITIES) .await? else { trace!("We didn't get a peer from the table"); @@ -334,7 +353,6 @@ impl PeerHandler { }; let Some((startblock, chunk_limit)) = tasks_queue_not_started.pop_front() else { - self.peer_table.free_peer(&peer_id).await?; if downloaded_count >= block_count { info!("All headers downloaded successfully"); break; @@ -348,10 +366,9 @@ impl PeerHandler { continue; }; - let tx = task_sender.clone(); - debug!("Downloader {peer_id} is now busy"); + let mut peer_table = self.peer_table.clone(); // run download_chunk_from_peer in a different Tokio task tokio::spawn(async move { @@ -360,7 +377,8 @@ impl PeerHandler { ); let headers = Self::download_chunk_from_peer( peer_id, - &mut peer_channel, + &mut connection, + &mut peer_table, startblock, chunk_limit, ) @@ -368,7 +386,7 @@ impl PeerHandler { .inspect_err(|err| trace!("Sync Log 6: {peer_id} failed to download chunk: {err}")) .unwrap_or_default(); - tx.send((headers, peer_id, peer_channel, startblock, chunk_limit)) + tx.send((headers, peer_id, connection, startblock, chunk_limit)) .await .inspect_err(|err| { error!("Failed to send headers result through channel. Error: {err}") @@ -439,36 +457,18 @@ impl PeerHandler { }); match self.get_random_peer(&SUPPORTED_ETH_CAPABILITIES).await? { None => return Ok(None), - Some((peer_id, mut peer_channel)) => { - let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel - .connection - .cast(CastMessage::BackendMessage(request)) - .await - { - debug!("Failed to send message to peer: {err:?}"); - continue; - } - if let Some(block_headers) = - tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { - loop { - match receiver.recv().await { - Some(RLPxMessage::BlockHeaders(BlockHeaders { - id, - block_headers, - })) if id == request_id => { - return Some(block_headers); - } - // Ignore replies that don't match the expected id (such as late responses) - Some(_) => continue, - None => return None, // Retry request - } - } - }) - .await - .ok() - .flatten() - .and_then(|headers| (!headers.is_empty()).then_some(headers)) + Some((peer_id, mut connection)) => { + if let Ok(RLPxMessage::BlockHeaders(BlockHeaders { + id: _, + block_headers, + })) = PeerHandler::make_request( + &mut self.peer_table, + peer_id, + &mut connection, + request, + PEER_REPLY_TIMEOUT, + ) + .await { if are_block_headers_chained(&block_headers, &order) { return Ok(Some(block_headers)); @@ -478,6 +478,7 @@ impl PeerHandler { ); } } + // Timeouted warn!( "[SYNCING] Didn't receive block headers from peer, penalizing peer {peer_id}..." ); @@ -492,7 +493,8 @@ impl PeerHandler { /// If it fails, returns an error message. async fn download_chunk_from_peer( peer_id: H256, - peer_channel: &mut PeerChannels, + connection: &mut PeerConnection, + peer_table: &mut PeerTable, startblock: u64, chunk_limit: u64, ) -> Result, PeerHandlerError> { @@ -505,38 +507,21 @@ impl PeerHandler { skip: 0, reverse: false, }); - let mut receiver = peer_channel.receiver.lock().await; - - // FIXME! modify the cast and wait for a `call` version - peer_channel - .connection - .cast(CastMessage::BackendMessage(request)) - .await - .map_err(|e| PeerHandlerError::SendMessageToPeer(e.to_string()))?; - - let block_headers = tokio::time::timeout(Duration::from_secs(2), async move { - loop { - match receiver.recv().await { - Some(RLPxMessage::BlockHeaders(BlockHeaders { id, block_headers })) - if id == request_id => - { - return Some(block_headers); - } - // Ignore replies that don't match the expected id (such as late responses) - Some(_) => continue, - None => return None, // EOF - } + if let Ok(RLPxMessage::BlockHeaders(BlockHeaders { + id: _, + block_headers, + })) = + PeerHandler::make_request(peer_table, peer_id, connection, request, PEER_REPLY_TIMEOUT) + .await + { + if are_block_headers_chained(&block_headers, &BlockRequestOrder::OldToNew) { + Ok(block_headers) + } else { + warn!("[SYNCING] Received invalid headers from peer: {peer_id}"); + Err(PeerHandlerError::InvalidHeaders) } - }) - .await - .map_err(|_| PeerHandlerError::BlockHeaders)? - .ok_or(PeerHandlerError::BlockHeaders)?; - - if are_block_headers_chained(&block_headers, &BlockRequestOrder::OldToNew) { - Ok(block_headers) } else { - warn!("[SYNCING] Received invalid headers from peer: {peer_id}"); - Err(PeerHandlerError::InvalidHeaders) + Err(PeerHandlerError::BlockHeaders) } } @@ -556,45 +541,25 @@ impl PeerHandler { }); match self.get_random_peer(&SUPPORTED_ETH_CAPABILITIES).await? { None => Ok(None), - Some((peer_id, mut peer_channel)) => { - let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel - .connection - .cast(CastMessage::BackendMessage(request)) - .await - { - self.peer_table.record_failure(&peer_id).await?; - debug!("Failed to send message to peer: {err:?}"); - return Ok(None); - } - if let Some(block_bodies) = - tokio::time::timeout(Duration::from_secs(2), async move { - loop { - match receiver.recv().await { - Some(RLPxMessage::BlockBodies(BlockBodies { - id, - block_bodies, - })) if id == request_id => { - return Some(block_bodies); - } - // Ignore replies that don't match the expected id (such as late responses) - Some(_) => continue, - None => return None, - } - } - }) - .await - .ok() - .flatten() - .and_then(|bodies| { - // Check that the response is not empty and does not contain more bodies than the ones requested - (!bodies.is_empty() && bodies.len() <= block_hashes_len).then_some(bodies) - }) + Some((peer_id, mut connection)) => { + if let Ok(RLPxMessage::BlockBodies(BlockBodies { + id: _, + block_bodies, + })) = PeerHandler::make_request( + &mut self.peer_table, + peer_id, + &mut connection, + request, + PEER_REPLY_TIMEOUT, + ) + .await { - self.peer_table.record_success(&peer_id).await?; - return Ok(Some((block_bodies, peer_id))); + // Check that the response is not empty and does not contain more bodies than the ones requested + if !block_bodies.is_empty() && block_bodies.len() <= block_hashes_len { + self.peer_table.record_success(&peer_id).await?; + return Ok(Some((block_bodies, peer_id))); + } } - warn!( "[SYNCING] Didn't receive block bodies from peer, penalizing peer {peer_id}..." ); @@ -675,40 +640,17 @@ impl PeerHandler { }); match self.get_random_peer(&SUPPORTED_ETH_CAPABILITIES).await? { None => return Ok(None), - Some((_, mut peer_channel)) => { - let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = peer_channel - .connection - .cast(CastMessage::BackendMessage(request)) - .await - { - debug!("Failed to send message to peer: {err:?}"); - continue; - } - if let Some(receipts) = tokio::time::timeout(PEER_REPLY_TIMEOUT, async move { - loop { - match receiver.recv().await { - Some(RLPxMessage::Receipts68(res)) => { - if res.get_id() == request_id { - return Some(res.get_receipts()); - } - return None; - } - Some(RLPxMessage::Receipts69(res)) => { - if res.get_id() == request_id { - return Some(res.receipts.clone()); - } - return None; - } - // Ignore replies that don't match the expected id (such as late responses) - Some(_) => continue, - None => return None, + Some((peer_id, mut connection)) => { + if let Some(receipts) = + match PeerHandler::make_request(&mut self.peer_table, peer_id, &mut connection, request, PEER_REPLY_TIMEOUT).await { + Ok(RLPxMessage::Receipts68(res)) => { + Some(res.get_receipts()) + } + Ok(RLPxMessage::Receipts69(res)) => { + Some(res.receipts.clone()) } + _ => None } - }) - .await - .ok() - .flatten() .and_then(|receipts| // Check that the response is not empty and does not contain more bodies than the ones requested (!receipts.is_empty() && receipts.len() <= block_hashes_len).then_some(receipts)) @@ -826,8 +768,6 @@ impl PeerHandler { } if let Ok((accounts, peer_id, chunk_start_end)) = task_receiver.try_recv() { - self.peer_table.free_peer(&peer_id).await?; - if let Some((chunk_start, chunk_end)) = chunk_start_end { if chunk_start <= chunk_end { tasks_queue_not_started.push_back((chunk_start, chunk_end)); @@ -859,9 +799,9 @@ impl PeerHandler { ); } - let Some((peer_id, peer_channel)) = self + let Some((peer_id, connection)) = self .peer_table - .use_best_peer(&SUPPORTED_ETH_CAPABILITIES) + .get_best_peer(&SUPPORTED_ETH_CAPABILITIES) .await .inspect_err(|err| error!(err= ?err, "Error requesting a peer for account range")) .unwrap_or(None) @@ -871,7 +811,6 @@ impl PeerHandler { }; let Some((chunk_start, chunk_end)) = tasks_queue_not_started.pop_front() else { - self.peer_table.free_peer(&peer_id).await?; if completed_tasks >= chunk_count { info!("All account ranges downloaded successfully"); break; @@ -893,12 +832,15 @@ impl PeerHandler { .expect("Should be able to update pivot") } + let peer_table = self.peer_table.clone(); + tokio::spawn(PeerHandler::request_account_range_worker( peer_id, + connection, + peer_table, chunk_start, chunk_end, pivot_header.state_root, - peer_channel, tx, )); } @@ -948,15 +890,16 @@ impl PeerHandler { #[allow(clippy::type_complexity)] async fn request_account_range_worker( - free_peer_id: H256, + peer_id: H256, + mut connection: PeerConnection, + mut peer_table: PeerTable, chunk_start: H256, chunk_end: H256, state_root: H256, - mut free_downloader_channels_clone: PeerChannels, tx: tokio::sync::mpsc::Sender<(Vec, H256, Option<(H256, H256)>)>, ) -> Result<(), PeerHandlerError> { debug!( - "Requesting account range from peer {free_peer_id}, chunk: {chunk_start:?} - {chunk_end:?}" + "Requesting account range from peer {peer_id}, chunk: {chunk_start:?} - {chunk_end:?}" ); let request_id = rand::random(); let request = RLPxMessage::GetAccountRange(GetAccountRange { @@ -966,36 +909,21 @@ impl PeerHandler { limit_hash: chunk_end, response_bytes: MAX_RESPONSE_BYTES, }); - let mut receiver = free_downloader_channels_clone.receiver.lock().await; - if let Err(err) = (free_downloader_channels_clone.connection) - .cast(CastMessage::BackendMessage(request)) - .await - { - error!("Failed to send message to peer: {err:?}"); - tx.send((Vec::new(), free_peer_id, Some((chunk_start, chunk_end)))) - .await - .ok(); - return Ok(()); - } - if let Some((accounts, proof)) = tokio::time::timeout(Duration::from_secs(2), async move { - loop { - if let RLPxMessage::AccountRange(AccountRange { - id, - accounts, - proof, - }) = receiver.recv().await? - && id == request_id - { - return Some((accounts, proof)); - } - } - }) + if let Ok(RLPxMessage::AccountRange(AccountRange { + id: _, + accounts, + proof, + })) = PeerHandler::make_request( + &mut peer_table, + peer_id, + &mut connection, + request, + PEER_REPLY_TIMEOUT, + ) .await - .ok() - .flatten() { if accounts.is_empty() { - tx.send((Vec::new(), free_peer_id, Some((chunk_start, chunk_end)))) + tx.send((Vec::new(), peer_id, Some((chunk_start, chunk_end)))) .await .ok(); return Ok(()); @@ -1019,7 +947,7 @@ impl PeerHandler { &encoded_accounts, &proof, ) else { - tx.send((Vec::new(), free_peer_id, Some((chunk_start, chunk_end)))) + tx.send((Vec::new(), peer_id, Some((chunk_start, chunk_end)))) .await .ok(); tracing::error!("Received invalid account range"); @@ -1031,7 +959,7 @@ impl PeerHandler { let last_hash = match account_hashes.last() { Some(last_hash) => last_hash, None => { - tx.send((Vec::new(), free_peer_id, Some((chunk_start, chunk_end)))) + tx.send((Vec::new(), peer_id, Some((chunk_start, chunk_end)))) .await .ok(); error!("Account hashes last failed, this shouldn't happen"); @@ -1049,14 +977,14 @@ impl PeerHandler { .into_iter() .filter(|unit| unit.hash <= chunk_end) .collect(), - free_peer_id, + peer_id, chunk_left, )) .await .ok(); } else { tracing::debug!("Failed to get account range"); - tx.send((Vec::new(), free_peer_id, Some((chunk_start, chunk_end)))) + tx.send((Vec::new(), peer_id, Some((chunk_start, chunk_end)))) .await .ok(); } @@ -1125,7 +1053,6 @@ impl PeerHandler { remaining_start, remaining_end, } = result; - self.peer_table.free_peer(&peer_id).await?; debug!( "Downloaded {} bytecodes from peer {peer_id} (current count: {downloaded_count})", @@ -1150,16 +1077,15 @@ impl PeerHandler { } } - let Some((peer_id, mut peer_channel)) = self + let Some((peer_id, mut connection)) = self .peer_table - .use_best_peer(&SUPPORTED_ETH_CAPABILITIES) + .get_best_peer(&SUPPORTED_ETH_CAPABILITIES) .await? else { continue; }; let Some((chunk_start, chunk_end)) = tasks_queue_not_started.pop_front() else { - self.peer_table.free_peer(&peer_id).await?; if completed_tasks >= chunk_count { info!("All bytecodes downloaded successfully"); break; @@ -1176,6 +1102,8 @@ impl PeerHandler { .copied() .collect(); + let mut peer_table = self.peer_table.clone(); + tokio::spawn(async move { let empty_task_result = TaskResult { start_index: chunk_start, @@ -1193,31 +1121,15 @@ impl PeerHandler { hashes: hashes_to_request.clone(), bytes: MAX_RESPONSE_BYTES, }); - let mut receiver = peer_channel.receiver.lock().await; - if let Err(err) = (peer_channel.connection) - .cast(CastMessage::BackendMessage(request)) + if let Ok(RLPxMessage::ByteCodes(ByteCodes { id: _, codes })) = + PeerHandler::make_request( + &mut peer_table, + peer_id, + &mut connection, + request, + PEER_REPLY_TIMEOUT, + ) .await - { - error!("Failed to send message to peer: {err:?}"); - tx.send(empty_task_result).await.ok(); - return; - } - if let Some(codes) = tokio::time::timeout(Duration::from_secs(2), async move { - loop { - match receiver.recv().await { - Some(RLPxMessage::ByteCodes(ByteCodes { id, codes })) - if id == request_id => - { - return Some(codes); - } - Some(_) => continue, - None => return None, - } - } - }) - .await - .ok() - .flatten() { if codes.is_empty() { tx.send(empty_task_result).await.ok(); @@ -1392,8 +1304,6 @@ impl PeerHandler { } = result; completed_tasks += 1; - self.peer_table.free_peer(&peer_id).await?; - for (_, accounts) in accounts_by_root_hash[start_index..remaining_start].iter() { for account in accounts { if !accounts_done.contains_key(account) { @@ -1665,16 +1575,15 @@ impl PeerHandler { break; } - let Some((peer_id, peer_channel)) = self + let Some((peer_id, connection)) = self .peer_table - .use_best_peer(&SUPPORTED_ETH_CAPABILITIES) + .get_best_peer(&SUPPORTED_ETH_CAPABILITIES) .await? else { continue; }; let Some(task) = tasks_queue_not_started.pop_front() else { - self.peer_table.free_peer(&peer_id).await?; if completed_tasks >= task_count { break; } @@ -1697,12 +1606,14 @@ impl PeerHandler { chunk_storage_roots.first().unwrap_or(&H256::zero()), ); } + let peer_table = self.peer_table.clone(); tokio::spawn(PeerHandler::request_storage_ranges_worker( task, peer_id, + connection, + peer_table, pivot_header.state_root, - peer_channel, chunk_account_hashes, chunk_storage_roots, tx, @@ -1745,18 +1656,16 @@ impl PeerHandler { // Dropping the task sender so that the recv returns None drop(task_sender); - while let Some(result) = task_receiver.recv().await { - self.peer_table.free_peer(&result.peer_id).await?; - } - Ok(chunk_index + 1) } + #[allow(clippy::too_many_arguments)] async fn request_storage_ranges_worker( task: StorageTask, - free_peer_id: H256, + peer_id: H256, + mut connection: PeerConnection, + mut peer_table: PeerTable, state_root: H256, - mut free_downloader_channels_clone: PeerChannels, chunk_account_hashes: Vec, chunk_storage_roots: Vec, tx: tokio::sync::mpsc::Sender, @@ -1768,7 +1677,7 @@ impl PeerHandler { let empty_task_result = StorageTaskResult { start_index: task.start_index, account_storages: Vec::new(), - peer_id: free_peer_id, + peer_id, remaining_start: task.start_index, remaining_end: task.end_index, remaining_hash_range: (start_hash, task.end_hash), @@ -1782,32 +1691,19 @@ impl PeerHandler { limit_hash: task.end_hash.unwrap_or(HASH_MAX), response_bytes: MAX_RESPONSE_BYTES, }); - let mut receiver = free_downloader_channels_clone.receiver.lock().await; - if let Err(err) = (free_downloader_channels_clone.connection) - .cast(CastMessage::BackendMessage(request)) - .await - { - error!("Failed to send message to peer: {err:?}"); - tx.send(empty_task_result).await.ok(); - return Ok(()); - } - let request_result = tokio::time::timeout(Duration::from_secs(2), async move { - loop { - match receiver.recv().await { - Some(RLPxMessage::StorageRanges(StorageRanges { id, slots, proof })) - if id == request_id => - { - return Some((slots, proof)); - } - Some(_) => continue, - None => return None, - } - } - }) + let Ok(RLPxMessage::StorageRanges(StorageRanges { + id: _, + slots, + proof, + })) = PeerHandler::make_request( + &mut peer_table, + peer_id, + &mut connection, + request, + PEER_REPLY_TIMEOUT, + ) .await - .ok() - .flatten(); - let Some((slots, proof)) = request_result else { + else { tracing::debug!("Failed to get storage range"); tx.send(empty_task_result).await.ok(); return Ok(()); @@ -1911,7 +1807,7 @@ impl PeerHandler { let task_result = StorageTaskResult { start_index: start, account_storages, - peer_id: free_peer_id, + peer_id, remaining_start, remaining_end, remaining_hash_range: (remaining_start_hash, task.end_hash), @@ -1921,7 +1817,9 @@ impl PeerHandler { } pub async fn request_state_trienodes( - peer_channel: &mut PeerChannels, + peer_id: H256, + mut connection: PeerConnection, + mut peer_table: PeerTable, state_root: H256, paths: Vec, ) -> Result, RequestStateTrieNodesError> { @@ -1940,10 +1838,31 @@ impl PeerHandler { .collect(), bytes: MAX_RESPONSE_BYTES, }); - let nodes = - super::utils::send_message_and_wait_for_response(peer_channel, request, request_id) - .await - .map_err(RequestStateTrieNodesError::SendMessageError)?; + let nodes = match PeerHandler::make_request( + &mut peer_table, + peer_id, + &mut connection, + request, + PEER_REPLY_TIMEOUT, + ) + .await + { + Ok(RLPxMessage::TrieNodes(trie_nodes)) => trie_nodes + .nodes + .iter() + .map(|node| Node::decode_raw(node)) + .collect::, _>>() + .map_err(|e| { + RequestStateTrieNodesError::RequestError(PeerConnectionError::RLPDecodeError(e)) + }), + Ok(other_msg) => Err(RequestStateTrieNodesError::RequestError( + PeerConnectionError::UnexpectedResponse( + "TrieNodes".to_string(), + other_msg.to_string(), + ), + )), + Err(other_err) => Err(RequestStateTrieNodesError::RequestError(other_err)), + }?; if nodes.is_empty() || nodes.len() > expected_nodes { return Err(RequestStateTrieNodesError::InvalidData); @@ -1968,16 +1887,34 @@ impl PeerHandler { /// - There are no available peers (the node just started up or was rejected by all other nodes) /// - No peer returned a valid response in the given time and retry limits pub async fn request_storage_trienodes( - peer_channel: &mut PeerChannels, + peer_id: H256, + mut connection: PeerConnection, + mut peer_table: PeerTable, get_trie_nodes: GetTrieNodes, ) -> Result { // Keep track of peers we requested from so we can penalize unresponsive peers when we get a response // This is so we avoid penalizing peers due to requesting stale data let id = get_trie_nodes.id; let request = RLPxMessage::GetTrieNodes(get_trie_nodes); - super::utils::send_trie_nodes_messages_and_wait_for_reply(peer_channel, request, id) - .await - .map_err(|err| RequestStorageTrieNodes::SendMessageError(id, err)) + match PeerHandler::make_request( + &mut peer_table, + peer_id, + &mut connection, + request, + PEER_REPLY_TIMEOUT, + ) + .await + { + Ok(RLPxMessage::TrieNodes(trie_nodes)) => Ok(trie_nodes), + Ok(other_msg) => Err(RequestStorageTrieNodes::RequestError( + id, + PeerConnectionError::UnexpectedResponse( + "TrieNodes".to_string(), + other_msg.to_string(), + ), + )), + Err(e) => Err(RequestStorageTrieNodes::RequestError(id, e)), + } } /// Returns the PeerData for each connected Peer @@ -1994,8 +1931,9 @@ impl PeerHandler { } pub async fn get_block_header( - &self, - peer_channel: &mut PeerChannels, + &mut self, + peer_id: H256, + connection: &mut PeerConnection, block_number: u64, ) -> Result, PeerHandlerError> { let request_id = rand::random(); @@ -2007,32 +1945,20 @@ impl PeerHandler { reverse: false, }); info!("get_block_header: requesting header with number {block_number}"); - - let mut receiver = peer_channel.receiver.lock().await; - debug!("locked the receiver for the peer_channel"); - peer_channel - .connection - .cast(CastMessage::BackendMessage(request.clone())) - .await - .map_err(|e| PeerHandlerError::SendMessageToPeer(e.to_string()))?; - - let response = - tokio::time::timeout(Duration::from_secs(5), async move { receiver.recv().await }) - .await; - - // TODO: we need to check, this seems a scenario where the peer channel does teardown - // after we sent the backend message - let Some(Ok(response)) = response - .inspect_err(|_err| info!("Timeout while waiting for sync head from peer")) - .transpose() - else { - warn!("The RLPxConnection closed the backend channel"); - return Ok(None); - }; - - match response { - RLPxMessage::BlockHeaders(BlockHeaders { id, block_headers }) => { - if id == request_id && !block_headers.is_empty() { + match PeerHandler::make_request( + &mut self.peer_table, + peer_id, + connection, + request, + PEER_REPLY_TIMEOUT, + ) + .await + { + Ok(RLPxMessage::BlockHeaders(BlockHeaders { + id: _, + block_headers, + })) => { + if !block_headers.is_empty() { return Ok(Some( block_headers .last() @@ -2041,9 +1967,17 @@ impl PeerHandler { )); } } - _other_msgs => { + Ok(_other_msgs) => { info!("Received unexpected message from peer"); } + Err(PeerConnectionError::Timeout) => { + info!("Timeout while waiting for sync head from peer"); + } + // TODO: we need to check, this seems a scenario where the peer channel does teardown + // after we sent the backend message + Err(_) => { + warn!("The RLPxConnection closed the backend channel"); + } } Ok(None) @@ -2104,6 +2038,8 @@ pub enum PeerHandlerError { WriteStorageSnapshotsDir(u64), #[error("Received unexpected response from peer {0}")] UnexpectedResponseFromPeer(H256), + #[error("Received an empty response from peer {0}")] + EmptyResponseFromPeer(H256), #[error("Failed to receive message from peer {0}")] ReceiveMessageFromPeer(H256), #[error("Timeout while waiting for message from peer {0}")] @@ -2142,8 +2078,8 @@ pub struct RequestMetadata { #[derive(Debug, thiserror::Error)] pub enum RequestStateTrieNodesError { - #[error("Send message error")] - SendMessageError(SendMessageError), + #[error("Send request error")] + RequestError(PeerConnectionError), #[error("Invalid data")] InvalidData, #[error("Invalid Hash")] @@ -2152,6 +2088,6 @@ pub enum RequestStateTrieNodesError { #[derive(Debug, thiserror::Error)] pub enum RequestStorageTrieNodes { - #[error("Send message error")] - SendMessageError(u64, SendMessageError), + #[error("Send request error")] + RequestError(u64, PeerConnectionError), } diff --git a/crates/networking/p2p/rlpx/connection/codec.rs b/crates/networking/p2p/rlpx/connection/codec.rs index a4f497bd5ce..2fdcdf5ab87 100644 --- a/crates/networking/p2p/rlpx/connection/codec.rs +++ b/crates/networking/p2p/rlpx/connection/codec.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, RwLock}; use crate::rlpx::{ - error::RLPxError, + error::PeerConnectionError, message::{self as rlpx, EthCapVersion}, utils::ecdh_xchng, }; @@ -39,14 +39,15 @@ impl RLPxCodec { remote_state: &RemoteState, hashed_nonces: [u8; 32], eth_version: Arc>, - ) -> Result { - let ephemeral_key_secret = ecdh_xchng( - &local_state.ephemeral_key, - &remote_state.ephemeral_key, - ) - .map_err(|error| { - RLPxError::CryptographyError(format!("Invalid generated ephemeral key secret: {error}")) - })?; + ) -> Result { + let ephemeral_key_secret = + ecdh_xchng(&local_state.ephemeral_key, &remote_state.ephemeral_key).map_err( + |error| { + PeerConnectionError::CryptographyError(format!( + "Invalid generated ephemeral key secret: {error}" + )) + }, + )?; // shared-secret = keccak256(ephemeral-key || keccak256(nonce || initiator-nonce)) let shared_secret = @@ -97,7 +98,7 @@ impl std::fmt::Debug for RLPxCodec { impl Decoder for RLPxCodec { type Item = rlpx::Message; - type Error = RLPxError; + type Error = PeerConnectionError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { let mac_aes_cipher = Aes256Enc::new_from_slice(&self.mac_key.0)?; @@ -113,7 +114,7 @@ impl Decoder for RLPxCodec { // Both are padded to the block's size (16 bytes) let (header_ciphertext, header_mac) = frame_header.split_at_mut_checked(16).ok_or_else(|| { - RLPxError::CryptographyError("Invalid frame header length".to_owned()) + PeerConnectionError::CryptographyError("Invalid frame header length".to_owned()) })?; // Validate MAC header @@ -124,14 +125,20 @@ impl Decoder for RLPxCodec { .clone() .finalize() .get(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid mac digest".to_owned()))? + .ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid mac digest".to_owned()) + })? .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid mac digest".to_owned()))?; + .map_err(|_| { + PeerConnectionError::CryptographyError("Invalid mac digest".to_owned()) + })?; let mut seed = mac_digest.into(); mac_aes_cipher.encrypt_block(&mut seed); (H128(seed.into()) ^ H128(header_ciphertext.try_into().map_err(|_| { - RLPxError::CryptographyError("Invalid header ciphertext length".to_owned()) + PeerConnectionError::CryptographyError( + "Invalid header ciphertext length".to_owned(), + ) })?)) .0 }; @@ -147,13 +154,17 @@ impl Decoder for RLPxCodec { .clone() .finalize() .get(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid header mac".to_owned()))? + .ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid header mac".to_owned()) + })? .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid header mac".to_owned()))?, + .map_err(|_| { + PeerConnectionError::CryptographyError("Invalid header mac".to_owned()) + })?, ); if header_mac != expected_header_mac.0 { - return Err(RLPxError::InvalidMessageFrame( + return Err(PeerConnectionError::InvalidMessageFrame( "Mismatched header mac".to_string(), )); } @@ -164,7 +175,7 @@ impl Decoder for RLPxCodec { temp_ingress_aes.try_apply_keystream(header_text)?; if header_text.len() < 3 { - return Err(RLPxError::CryptographyError( + return Err(PeerConnectionError::CryptographyError( "Invalid header text length".to_owned(), )); } @@ -176,7 +187,7 @@ impl Decoder for RLPxCodec { // Check that the size is not too large to avoid a denial of // service attack where the server runs out of memory. if padded_size > MAX_MESSAGE_SIZE { - return Err(RLPxError::InvalidMessageLength()); + return Err(PeerConnectionError::InvalidMessageLength()); } let total_message_size = (32 + padded_size + 16) as usize; @@ -197,7 +208,9 @@ impl Decoder for RLPxCodec { // this frame. let mut frame_data = src .get(32..total_message_size) - .ok_or_else(|| RLPxError::CryptographyError("Invalid frame data length".to_owned()))? + .ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid frame data length".to_owned()) + })? .to_vec(); src.advance(total_message_size); @@ -208,7 +221,7 @@ impl Decoder for RLPxCodec { let (frame_ciphertext, frame_mac) = frame_data .split_at_mut_checked(padded_size as usize) .ok_or_else(|| { - RLPxError::CryptographyError("Invalid frame data length".to_owned()) + PeerConnectionError::CryptographyError("Invalid frame data length".to_owned()) })?; // check MAC @@ -219,9 +232,13 @@ impl Decoder for RLPxCodec { .clone() .finalize() .get(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid mac digest".to_owned()))? + .ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid mac digest".to_owned()) + })? .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid mac digest".to_owned()))?; + .map_err(|_| { + PeerConnectionError::CryptographyError("Invalid mac digest".to_owned()) + })?; let mut seed = mac_digest.into(); mac_aes_cipher.encrypt_block(&mut seed); (H128(seed.into()) ^ H128(mac_digest)).0 @@ -232,12 +249,12 @@ impl Decoder for RLPxCodec { .clone() .finalize() .get(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid frame mac".to_owned()))? + .ok_or_else(|| PeerConnectionError::CryptographyError("Invalid frame mac".to_owned()))? .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid frame mac".to_owned()))?; + .map_err(|_| PeerConnectionError::CryptographyError("Invalid frame mac".to_owned()))?; if frame_mac != expected_frame_mac { - return Err(RLPxError::InvalidMessageFrame( + return Err(PeerConnectionError::InvalidMessageFrame( "Mismatched frame mac".to_string(), )); } @@ -247,7 +264,9 @@ impl Decoder for RLPxCodec { let (frame_data, _padding) = frame_ciphertext .split_at_checked(frame_size as usize) - .ok_or_else(|| RLPxError::CryptographyError("Invalid frame size".to_owned()))?; + .ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid frame size".to_owned()) + })?; let (msg_id, msg_data): (u8, _) = RLPDecode::decode_unfinished(frame_data)?; Ok(Some(rlpx::Message::decode( @@ -256,7 +275,7 @@ impl Decoder for RLPxCodec { *self .eth_version .read() - .map_err(|err| RLPxError::InternalError(err.to_string()))?, + .map_err(|err| PeerConnectionError::InternalError(err.to_string()))?, )?)) } @@ -282,7 +301,7 @@ impl Decoder for RLPxCodec { } impl Encoder for RLPxCodec { - type Error = RLPxError; + type Error = PeerConnectionError; fn encode(&mut self, message: rlpx::Message, buffer: &mut BytesMut) -> Result<(), Self::Error> { let mut frame_data = vec![]; @@ -291,7 +310,7 @@ impl Encoder for RLPxCodec { *self .eth_version .read() - .map_err(|err| RLPxError::InternalError(err.to_string()))?, + .map_err(|err| PeerConnectionError::InternalError(err.to_string()))?, )?; let mac_aes_cipher = Aes256Enc::new_from_slice(&self.mac_key.0)?; @@ -299,22 +318,19 @@ impl Encoder for RLPxCodec { // header = frame-size || header-data || header-padding let mut header = Vec::with_capacity(32); let frame_size = frame_data.len().to_be_bytes(); - header.extend_from_slice( - frame_size - .get(5..8) - .ok_or_else(|| RLPxError::CryptographyError("Invalid frame size".to_owned()))?, - ); + header.extend_from_slice(frame_size.get(5..8).ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid frame size".to_owned()) + })?); // header-data = [capability-id, context-id] (both always zero) let header_data = (0_u8, 0_u8); header_data.encode(&mut header); header.resize(16, 0); - self.egress_aes.try_apply_keystream( - header - .get_mut(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid header length".to_owned()))?, - )?; + self.egress_aes + .try_apply_keystream(header.get_mut(..16).ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid header length".to_owned()) + })?)?; let header_mac_seed = { let mac_digest: [u8; 16] = self @@ -322,23 +338,31 @@ impl Encoder for RLPxCodec { .clone() .finalize() .get(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid mac digest".to_owned()))? + .ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid mac digest".to_owned()) + })? .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid mac digest".to_owned()))?; + .map_err(|_| { + PeerConnectionError::CryptographyError("Invalid mac digest".to_owned()) + })?; let mut seed = mac_digest.into(); mac_aes_cipher.encrypt_block(&mut seed); let header_data = header .get(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid header length".to_owned()))? + .ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid header length".to_owned()) + })? .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid header length".to_owned()))?; + .map_err(|_| { + PeerConnectionError::CryptographyError("Invalid header length".to_owned()) + })?; H128(seed.into()) ^ H128(header_data) }; self.egress_mac.update(header_mac_seed); let header_mac = self.egress_mac.clone().finalize(); - let header_mac_data = header_mac - .get(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid header mac".to_owned()))?; + let header_mac_data = header_mac.get(..16).ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid header mac".to_owned()) + })?; header.extend_from_slice(header_mac_data); // Write header @@ -362,9 +386,13 @@ impl Encoder for RLPxCodec { .clone() .finalize() .get(..16) - .ok_or_else(|| RLPxError::CryptographyError("Invalid mac digest".to_owned()))? + .ok_or_else(|| { + PeerConnectionError::CryptographyError("Invalid mac digest".to_owned()) + })? .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid mac digest".to_owned()))?; + .map_err(|_| { + PeerConnectionError::CryptographyError("Invalid mac digest".to_owned()) + })?; let mut seed = mac_digest.into(); mac_aes_cipher.encrypt_block(&mut seed); (H128(seed.into()) ^ H128(mac_digest)).0 diff --git a/crates/networking/p2p/rlpx/connection/handshake.rs b/crates/networking/p2p/rlpx/connection/handshake.rs index 991712bb6eb..688b1f8dbda 100644 --- a/crates/networking/p2p/rlpx/connection/handshake.rs +++ b/crates/networking/p2p/rlpx/connection/handshake.rs @@ -10,8 +10,8 @@ use super::{ }; use crate::{ rlpx::{ - connection::server::{Established, InnerState}, - error::RLPxError, + connection::server::{ConnectionState, Established}, + error::PeerConnectionError, l2::l2_connection::L2ConnState, message::EthCapVersion, utils::{ @@ -61,11 +61,11 @@ pub(crate) struct LocalState { } pub(crate) async fn perform( - state: InnerState, + state: ConnectionState, eth_version: Arc>, -) -> Result<(Established, SplitStream>), RLPxError> { - let (context, node, framed, inbound) = match state { - InnerState::Initiator(Initiator { context, node, .. }) => { +) -> Result<(Established, SplitStream>), PeerConnectionError> { + let (context, node, framed) = match state { + ConnectionState::Initiator(Initiator { context, node, .. }) => { let addr = SocketAddr::new(node.ip, node.tcp_port); let mut stream = match tcp_stream(addr).await { Ok(result) => result, @@ -83,15 +83,17 @@ pub(crate) async fn perform( Keccak256::digest([remote_state.nonce.0, local_state.nonce.0].concat()).into(); let codec = RLPxCodec::new(&local_state, &remote_state, hashed_nonces, eth_version)?; log_peer_debug(&node, "Completed handshake as initiator"); - (context, node, Framed::new(stream, codec), false) + (context, node, Framed::new(stream, codec)) } - InnerState::Receiver(Receiver { + ConnectionState::Receiver(Receiver { context, peer_addr, stream, }) => { let Some(mut stream) = Arc::into_inner(stream) else { - return Err(RLPxError::StateError("Cannot use the stream".to_string())); + return Err(PeerConnectionError::StateError( + "Cannot use the stream".to_string(), + )); }; let remote_state = receive_auth(&context.signer, &mut stream).await?; let local_state = send_ack(remote_state.public_key, &mut stream).await?; @@ -107,15 +109,19 @@ pub(crate) async fn perform( remote_state.public_key, ); log_peer_debug(&node, "Completed handshake as receiver"); - (context, node, Framed::new(stream, codec), true) + (context, node, Framed::new(stream, codec)) } - InnerState::Established(_) => { - return Err(RLPxError::StateError("Already established".to_string())); + ConnectionState::Established(_) => { + return Err(PeerConnectionError::StateError( + "Already established".to_string(), + )); } // Shouldn't perform a Handshake on an already failed connection. // Put it here to complete the match arms - InnerState::HandshakeFailed => { - return Err(RLPxError::StateError("Handshake Failed".to_string())); + ConnectionState::HandshakeFailed => { + return Err(PeerConnectionError::StateError( + "Handshake Failed".to_string(), + )); } }; let (sink, stream) = framed.split(); @@ -134,12 +140,11 @@ pub(crate) async fn perform( client_version: context.client_version.clone(), connection_broadcast_send: context.broadcast.clone(), peer_table: context.table.clone(), - backend_channel: None, - _inbound: inbound, l2_state: context .based_context .map_or_else(|| L2ConnState::Unsupported, L2ConnState::Disconnected), tx_broadcaster: context.tx_broadcaster.clone(), + current_requests: HashMap::new(), }, stream, )) @@ -156,8 +161,9 @@ async fn send_auth( signer: &SecretKey, remote_public_key: H512, mut stream: S, -) -> Result { - let peer_pk = compress_pubkey(remote_public_key).ok_or_else(RLPxError::InvalidPeerId)?; +) -> Result { + let peer_pk = + compress_pubkey(remote_public_key).ok_or_else(PeerConnectionError::InvalidPeerId)?; let local_nonce = H256::random_using(&mut rand::thread_rng()); let local_ephemeral_key = SecretKey::new(&mut rand::thread_rng()); @@ -175,8 +181,9 @@ async fn send_auth( async fn send_ack( remote_public_key: H512, mut stream: S, -) -> Result { - let peer_pk = compress_pubkey(remote_public_key).ok_or_else(RLPxError::InvalidPeerId)?; +) -> Result { + let peer_pk = + compress_pubkey(remote_public_key).ok_or_else(PeerConnectionError::InvalidPeerId)?; let local_nonce = H256::random_using(&mut rand::thread_rng()); let local_ephemeral_key = SecretKey::new(&mut rand::thread_rng()); @@ -194,14 +201,14 @@ async fn send_ack( async fn receive_auth( signer: &SecretKey, stream: S, -) -> Result { +) -> Result { let msg_bytes = receive_handshake_msg(stream).await?; let size_data = &msg_bytes .get(..2) - .ok_or_else(RLPxError::InvalidMessageLength)?; + .ok_or_else(PeerConnectionError::InvalidMessageLength)?; let msg = &msg_bytes .get(2..) - .ok_or_else(RLPxError::InvalidMessageLength)?; + .ok_or_else(PeerConnectionError::InvalidMessageLength)?; let (auth, remote_ephemeral_key) = decode_auth_message(signer, msg, size_data)?; Ok(RemoteState { @@ -216,18 +223,18 @@ async fn receive_ack( signer: &SecretKey, remote_public_key: H512, stream: S, -) -> Result { +) -> Result { let msg_bytes = receive_handshake_msg(stream).await?; let size_data = &msg_bytes .get(..2) - .ok_or_else(RLPxError::InvalidMessageLength)?; + .ok_or_else(PeerConnectionError::InvalidMessageLength)?; let msg = &msg_bytes .get(2..) - .ok_or_else(RLPxError::InvalidMessageLength)?; + .ok_or_else(PeerConnectionError::InvalidMessageLength)?; let ack = decode_ack_message(signer, msg, size_data)?; let remote_ephemeral_key = ack .get_ephemeral_pubkey() - .ok_or_else(|| RLPxError::NotFound("Remote ephemeral key".to_string()))?; + .ok_or_else(|| PeerConnectionError::NotFound("Remote ephemeral key".to_string()))?; Ok(RemoteState { public_key: remote_public_key, @@ -239,7 +246,7 @@ async fn receive_ack( async fn receive_handshake_msg( mut stream: S, -) -> Result, RLPxError> { +) -> Result, PeerConnectionError> { let mut buf = vec![0; 2]; // Read the message's size @@ -247,14 +254,16 @@ async fn receive_handshake_msg( let ack_data = [buf[0], buf[1]]; let msg_size = u16::from_be_bytes(ack_data) as usize; if msg_size > P2P_MAX_MESSAGE_SIZE { - return Err(RLPxError::InvalidMessageLength()); + return Err(PeerConnectionError::InvalidMessageLength()); } buf.resize(msg_size + 2, 0); // Read the rest of the message // Guard unwrap if buf.len() < msg_size + 2 { - return Err(RLPxError::CryptographyError(String::from("bad buf size"))); + return Err(PeerConnectionError::CryptographyError(String::from( + "bad buf size", + ))); } stream.read_exact(&mut buf[2..msg_size + 2]).await?; let ack_bytes = &buf[..msg_size + 2]; @@ -267,12 +276,14 @@ fn encode_auth_message( local_nonce: H256, remote_static_pubkey: &PublicKey, local_ephemeral_key: &SecretKey, -) -> Result, RLPxError> { +) -> Result, PeerConnectionError> { let public_key = decompress_pubkey(&static_key.public_key(secp256k1::SECP256K1)); // Derive a shared secret from the static keys. let static_shared_secret = ecdh_xchng(static_key, remote_static_pubkey).map_err(|error| { - RLPxError::CryptographyError(format!("Invalid generated static shared secret: {error}")) + PeerConnectionError::CryptographyError(format!( + "Invalid generated static shared secret: {error}" + )) })?; // Create the signature included in the message. @@ -296,16 +307,19 @@ fn decode_auth_message( static_key: &SecretKey, msg: &[u8], auth_data: &[u8], -) -> Result<(AuthMessage, PublicKey), RLPxError> { +) -> Result<(AuthMessage, PublicKey), PeerConnectionError> { let payload = decrypt_message(static_key, msg, auth_data)?; // RLP-decode the message. let (auth, _padding) = AuthMessage::decode_unfinished(&payload)?; // Derive a shared secret from the static keys. - let peer_pk = compress_pubkey(auth.public_key).ok_or_else(RLPxError::InvalidPeerId)?; + let peer_pk = + compress_pubkey(auth.public_key).ok_or_else(PeerConnectionError::InvalidPeerId)?; let static_shared_secret = ecdh_xchng(static_key, &peer_pk).map_err(|error| { - RLPxError::CryptographyError(format!("Invalid generated static shared secret: {error}")) + PeerConnectionError::CryptographyError(format!( + "Invalid generated static shared secret: {error}" + )) })?; let remote_ephemeral_key = retrieve_remote_ephemeral_key(static_shared_secret.into(), auth.nonce, auth.signature)?; @@ -317,7 +331,7 @@ fn encode_ack_message( local_ephemeral_key: &SecretKey, local_nonce: H256, remote_static_pubkey: &PublicKey, -) -> Result, RLPxError> { +) -> Result, PeerConnectionError> { // Compose the ack message. let ack_msg = AckMessage::new( decompress_pubkey(&local_ephemeral_key.public_key(secp256k1::SECP256K1)), @@ -335,7 +349,7 @@ fn decode_ack_message( static_key: &SecretKey, msg: &[u8], auth_data: &[u8], -) -> Result { +) -> Result { let payload = decrypt_message(static_key, msg, auth_data)?; // RLP-decode the message. @@ -348,36 +362,40 @@ fn decrypt_message( static_key: &SecretKey, msg: &[u8], size_data: &[u8], -) -> Result, RLPxError> { +) -> Result, PeerConnectionError> { // Split the message into its components. General layout is: // public-key (65) || iv (16) || ciphertext || mac (32) let (pk, rest) = msg .split_at_checked(65) - .ok_or_else(RLPxError::InvalidMessageLength)?; + .ok_or_else(PeerConnectionError::InvalidMessageLength)?; let (iv, rest) = rest .split_at_checked(16) - .ok_or_else(RLPxError::InvalidMessageLength)?; + .ok_or_else(PeerConnectionError::InvalidMessageLength)?; let (c, d) = rest .split_at_checked(rest.len() - 32) - .ok_or_else(RLPxError::InvalidMessageLength)?; + .ok_or_else(PeerConnectionError::InvalidMessageLength)?; // Derive the message shared secret. let shared_secret = ecdh_xchng(static_key, &PublicKey::from_slice(pk)?).map_err(|error| { - RLPxError::CryptographyError(format!("Invalid generated shared secret: {error}")) + PeerConnectionError::CryptographyError(format!("Invalid generated shared secret: {error}")) })?; // Derive the AES and MAC keys from the message shared secret. let mut buf = [0; 32]; kdf(&shared_secret, &mut buf).map_err(|error| { - RLPxError::CryptographyError(format!("Couldn't get keys from shared secret: {error}")) + PeerConnectionError::CryptographyError(format!( + "Couldn't get keys from shared secret: {error}" + )) })?; let aes_key = &buf[..16]; let mac_key = sha256(&buf[16..]); // Verify the MAC. let expected_d = sha256_hmac(&mac_key, &[iv, c], size_data) - .map_err(|error| RLPxError::CryptographyError(error.to_string()))?; + .map_err(|error| PeerConnectionError::CryptographyError(error.to_string()))?; if d != expected_d { - return Err(RLPxError::HandshakeError(String::from("Invalid MAC"))); + return Err(PeerConnectionError::HandshakeError(String::from( + "Invalid MAC", + ))); } // Decrypt the message with the AES key. @@ -390,7 +408,7 @@ fn decrypt_message( fn encrypt_message( remote_static_pubkey: &PublicKey, mut encoded_msg: Vec, -) -> Result, RLPxError> { +) -> Result, PeerConnectionError> { const SIGNATURE_SIZE: u16 = 65; const IV_SIZE: u16 = 16; const MAC_FOOTER_SIZE: u16 = 32; @@ -407,7 +425,7 @@ fn encrypt_message( let encoded_msg_len: u16 = encoded_msg .len() .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid message length".to_owned()))?; + .map_err(|_| PeerConnectionError::CryptographyError("Invalid message length".to_owned()))?; let auth_size = ecies_overhead + encoded_msg_len; let auth_size_bytes = auth_size.to_be_bytes(); @@ -417,13 +435,15 @@ fn encrypt_message( // Derive a shared secret for this message. let message_secret = ecdh_xchng(&message_secret_key, remote_static_pubkey).map_err(|error| { - RLPxError::CryptographyError(format!("Invalid generated message secret: {error}")) + PeerConnectionError::CryptographyError(format!( + "Invalid generated message secret: {error}" + )) })?; // Derive the AES and MAC keys from the message secret. let mut secret_keys = [0; 32]; kdf(&message_secret, &mut secret_keys) - .map_err(|error| RLPxError::CryptographyError(error.to_string()))?; + .map_err(|error| PeerConnectionError::CryptographyError(error.to_string()))?; let aes_key = &secret_keys[..16]; let mac_key = sha256(&secret_keys[16..]); @@ -438,7 +458,7 @@ fn encrypt_message( .public_key(secp256k1::SECP256K1) .serialize_uncompressed(); let mac_footer = sha256_hmac(&mac_key, &[&iv.0, &encrypted_auth_msg], &auth_size_bytes) - .map_err(|error| RLPxError::CryptographyError(error.to_string()))?; + .map_err(|error| PeerConnectionError::CryptographyError(error.to_string()))?; // Return the message let mut final_msg = Vec::new(); @@ -454,7 +474,7 @@ fn retrieve_remote_ephemeral_key( shared_secret: H256, remote_nonce: H256, signature: Signature, -) -> Result { +) -> Result { let signature_prehash = shared_secret ^ remote_nonce; let msg = secp256k1::Message::from_digest_slice(signature_prehash.as_bytes())?; let rid = RecoveryId::try_from(Into::::into(signature[64]))?; @@ -466,7 +486,7 @@ fn sign_shared_secret( shared_secret: H256, local_nonce: H256, local_ephemeral_key: &SecretKey, -) -> Result { +) -> Result { let signature_prehash = shared_secret ^ local_nonce; let msg = secp256k1::Message::from_digest_slice(signature_prehash.as_bytes())?; let sig = secp256k1::SECP256K1.sign_ecdsa_recoverable(&msg, local_ephemeral_key); @@ -475,7 +495,7 @@ fn sign_shared_secret( signature_bytes[..64].copy_from_slice(&signature); signature_bytes[64] = Into::::into(rid) .try_into() - .map_err(|_| RLPxError::CryptographyError("Invalid recovery id".into()))?; + .map_err(|_| PeerConnectionError::CryptographyError("Invalid recovery id".into()))?; Ok(signature_bytes.into()) } diff --git a/crates/networking/p2p/rlpx/connection/server.rs b/crates/networking/p2p/rlpx/connection/server.rs index 16665ca3c28..ae2af5bc955 100644 --- a/crates/networking/p2p/rlpx/connection/server.rs +++ b/crates/networking/p2p/rlpx/connection/server.rs @@ -1,43 +1,11 @@ -use std::{ - collections::HashMap, - net::SocketAddr, - sync::{Arc, RwLock}, - time::Duration, -}; - -use ethrex_blockchain::Blockchain; -use ethrex_common::types::{MempoolTransaction, Transaction}; -use ethrex_storage::{Store, error::StoreError}; -use ethrex_trie::TrieError; -use futures::{SinkExt as _, Stream, stream::SplitSink}; -use rand::random; -use secp256k1::{PublicKey, SecretKey}; -use spawned_concurrency::{ - messages::Unused, - tasks::{ - CastResponse, GenServer, GenServerHandle, - InitResult::{self, NoSuccess, Success}, - send_interval, spawn_listener, - }, -}; -use spawned_rt::tasks::{BroadcastStream, mpsc}; -use tokio::{ - net::TcpStream, - sync::{Mutex, broadcast}, - task::{self, Id}, -}; -use tokio_stream::StreamExt; -use tokio_util::codec::Framed; -use tracing::{debug, error}; - use crate::{ - discv4::peer_table::{PeerChannels, PeerTableHandle}, + discv4::peer_table::PeerTable, metrics::METRICS, network::P2PContext, rlpx::{ Message, connection::{codec::RLPxCodec, handshake}, - error::RLPxError, + error::PeerConnectionError, eth::{ backend, blocks::{BlockBodies, BlockHeaders}, @@ -67,21 +35,121 @@ use crate::{ tx_broadcaster::{InMessage, TxBroadcaster, send_tx_hashes}, types::Node, }; +use ethrex_blockchain::Blockchain; +use ethrex_common::types::{MempoolTransaction, Transaction}; +use ethrex_storage::{Store, error::StoreError}; +use ethrex_trie::TrieError; +use futures::{SinkExt as _, Stream, stream::SplitSink}; +use rand::random; +use secp256k1::{PublicKey, SecretKey}; +use spawned_concurrency::{ + messages::Unused, + tasks::{ + CastResponse, GenServer, GenServerHandle, + InitResult::{self, NoSuccess, Success}, + send_interval, spawn_listener, + }, +}; +use spawned_rt::tasks::BroadcastStream; +use std::{ + collections::HashMap, + net::SocketAddr, + sync::{Arc, RwLock}, + time::Duration, +}; +use tokio::{ + net::TcpStream, + sync::{Mutex, broadcast, oneshot}, + task::{self, Id}, +}; +use tokio_stream::StreamExt; +use tokio_util::codec::Framed; +use tracing::{debug, error}; const PING_INTERVAL: Duration = Duration::from_secs(10); const BLOCK_RANGE_UPDATE_INTERVAL: Duration = Duration::from_secs(60); -pub(crate) type RLPxConnBroadcastSender = broadcast::Sender<(tokio::task::Id, Arc)>; - -type RLPxConnectionHandle = GenServerHandle; +pub(crate) type PeerConnBroadcastSender = broadcast::Sender<(tokio::task::Id, Arc)>; #[derive(Clone, Debug)] +pub struct PeerConnection { + handle: GenServerHandle, +} + +impl PeerConnection { + pub async fn spawn_as_receiver( + context: P2PContext, + peer_addr: SocketAddr, + stream: TcpStream, + ) -> PeerConnection { + let state = ConnectionState::Receiver(Receiver { + context, + peer_addr, + stream: Arc::new(stream), + }); + let connection = PeerConnectionServer { state }; + Self { + handle: connection.start(), + } + } + + pub async fn spawn_as_initiator(context: P2PContext, node: &Node) -> PeerConnection { + let state = ConnectionState::Initiator(Initiator { + context, + node: node.clone(), + }); + let connection = PeerConnectionServer { state }; + Self { + handle: connection.start(), + } + } + + pub async fn outgoing_message(&mut self, message: Message) -> Result<(), PeerConnectionError> { + self.handle + .cast(CastMessage::OutgoingMessage(message)) + .await + .map_err(|err| PeerConnectionError::InternalError(err.to_string())) + } + + pub async fn outgoing_request( + &mut self, + message: Message, + timeout: Duration, + ) -> Result { + let id = message + .request_id() + .expect("Cannot wait on request without id"); + let (oneshot_tx, oneshot_rx) = oneshot::channel::(); + + self.handle + .cast(CastMessage::OutgoingRequest(message, Arc::new(oneshot_tx))) + .await + .map_err(|err| PeerConnectionError::InternalError(err.to_string()))?; + + // Wait for the response or timeout. This blocks the calling task (and not the ConnectionServer task) + match tokio::time::timeout(timeout, oneshot_rx).await { + Ok(Ok(response)) => Ok(response), + Ok(Err(error)) => Err(PeerConnectionError::RecvError(error.to_string())), + Err(_timeout) => { + // Notify timeout on request id + self.handle + .cast(CastMessage::RequestTimeout { id }) + .await + .map_err(|err| PeerConnectionError::InternalError(err.to_string()))?; + // Return timeout error + Err(PeerConnectionError::Timeout) + } + } + } +} + +#[derive(Debug)] pub struct Initiator { pub(crate) context: P2PContext, pub(crate) node: Node, } -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct Receiver { pub(crate) context: P2PContext, pub(crate) peer_addr: SocketAddr, @@ -113,12 +181,11 @@ pub struct Established { //// under `handle_peer`. /// TODO: Improve this mechanism /// See https://github.com/lambdaclass/ethrex/issues/3388 - pub(crate) connection_broadcast_send: RLPxConnBroadcastSender, - pub(crate) peer_table: PeerTableHandle, - pub(crate) backend_channel: Option>, - pub(crate) _inbound: bool, + pub(crate) connection_broadcast_send: PeerConnBroadcastSender, + pub(crate) peer_table: PeerTable, pub(crate) l2_state: L2ConnState, pub(crate) tx_broadcaster: GenServerHandle, + pub(crate) current_requests: HashMap)>, } impl Established { @@ -134,7 +201,7 @@ impl Established { } #[derive(Debug)] -pub enum InnerState { +pub enum ConnectionState { HandshakeFailed, Initiator(Initiator), Receiver(Receiver), @@ -145,12 +212,20 @@ pub enum InnerState { #[allow(private_interfaces)] pub enum CastMessage { /// Received a message from the remote peer - PeerMessage(Message), - /// This node requests information from the remote peer - BackendMessage(Message), + IncomingMessage(Message), + /// We send information to the remote peer + OutgoingMessage(Message), + /// We request information from the remote peer + OutgoingRequest(Message, Arc>), + /// Received a notification of a request that timeouted. + RequestTimeout { id: u64 }, + /// Periodic message to send ping to remote peer SendPing, + /// Periodic message to send block range update to remote peer BlockRangeUpdate, + /// Received a message to broadcast. Used only for L2, we have to move this logic to tx_broadcaster. BroadcastMessage(task::Id, Arc), + /// L2 message L2(L2Cast), } @@ -164,40 +239,15 @@ pub enum OutMessage { } #[derive(Debug)] -pub struct RLPxConnection { - inner_state: InnerState, -} - -impl RLPxConnection { - pub async fn spawn_as_receiver( - context: P2PContext, - peer_addr: SocketAddr, - stream: TcpStream, - ) -> RLPxConnectionHandle { - let inner_state = InnerState::Receiver(Receiver { - context, - peer_addr, - stream: Arc::new(stream), - }); - let connection = RLPxConnection { inner_state }; - connection.start() - } - - pub async fn spawn_as_initiator(context: P2PContext, node: &Node) -> RLPxConnectionHandle { - let inner_state = InnerState::Initiator(Initiator { - context, - node: node.clone(), - }); - let connection = RLPxConnection { inner_state }; - connection.start() - } +pub struct PeerConnectionServer { + state: ConnectionState, } -impl GenServer for RLPxConnection { +impl GenServer for PeerConnectionServer { type CallMsg = Unused; type CastMsg = CastMessage; type OutMsg = Unused; - type Error = RLPxError; + type Error = PeerConnectionError; async fn init( mut self, @@ -206,7 +256,7 @@ impl GenServer for RLPxConnection { // Set a default eth version that we can update after we negotiate peer capabilities // This eth version will only be used to encode & decode the initial `Hello` messages. let eth_version = Arc::new(RwLock::new(EthCapVersion::default())); - match handshake::perform(self.inner_state, eth_version.clone()).await { + match handshake::perform(self.state, eth_version.clone()).await { Ok((mut established_state, stream)) => { log_peer_debug(&established_state.node, "Starting RLPx connection"); @@ -214,7 +264,8 @@ impl GenServer for RLPxConnection { initialize_connection(handle, &mut established_state, stream, eth_version).await { match &reason { - RLPxError::NoMatchingCapabilities() | RLPxError::HandshakeError(_) => { + PeerConnectionError::NoMatchingCapabilities() + | PeerConnectionError::HandshakeError(_) => { established_state .peer_table .set_unwanted(&established_state.node.node_id()) @@ -231,7 +282,7 @@ impl GenServer for RLPxConnection { METRICS.record_new_rlpx_conn_failure(reason).await; - self.inner_state = InnerState::Established(Box::new(established_state)); + self.state = ConnectionState::Established(Box::new(established_state)); Ok(NoSuccess(self)) } else { METRICS @@ -244,7 +295,7 @@ impl GenServer for RLPxConnection { ) .await; // New state - self.inner_state = InnerState::Established(Box::new(established_state)); + self.state = ConnectionState::Established(Box::new(established_state)); Ok(Success(self)) } } @@ -252,7 +303,7 @@ impl GenServer for RLPxConnection { // Handshake failed, just log a debug message. // No connection was established so no need to perform any other action debug!("Failed Handshake on RLPx connection {err}"); - self.inner_state = InnerState::HandshakeFailed; + self.state = ConnectionState::HandshakeFailed; Ok(NoSuccess(self)) } } @@ -261,24 +312,47 @@ impl GenServer for RLPxConnection { async fn handle_cast( &mut self, message: Self::CastMsg, - _handle: &RLPxConnectionHandle, + _handle: &GenServerHandle, ) -> CastResponse { - if let InnerState::Established(ref mut established_state) = self.inner_state { + if let ConnectionState::Established(ref mut established_state) = self.state { let peer_supports_l2 = established_state.l2_state.connection_state().is_ok(); let result = match message { - Self::CastMsg::PeerMessage(message) => { + Self::CastMsg::IncomingMessage(message) => { log_peer_debug( &established_state.node, - &format!("Received peer message: {message}"), + &format!("Received incomming message: {message}"), ); - handle_peer_message(established_state, message).await + handle_incoming_message(established_state, message).await } - Self::CastMsg::BackendMessage(message) => { + Self::CastMsg::OutgoingMessage(message) => { log_peer_debug( &established_state.node, - &format!("Received backend message: {message}"), + &format!("Received outgoing request: {message}"), ); - handle_backend_message(established_state, message).await + handle_outgoing_message(established_state, message).await + } + Self::CastMsg::OutgoingRequest(message, sender) => { + log_peer_debug( + &established_state.node, + &format!("Received outgoing request: {message}"), + ); + handle_outgoing_request( + established_state, + message, + Arc::>::into_inner(sender) + .expect("Could not obtain sender channel"), + ) + .await + } + Self::CastMsg::RequestTimeout { id } => { + // Discard the request from current requests + if let Some((msg_type, _)) = established_state.current_requests.remove(&id) { + log_peer_debug( + &established_state.node, + &format!("{msg_type}({id}) timeouted."), + ); + } + Ok(()) } Self::CastMsg::SendPing => { send(established_state, Message::Ping(PingMessage {})).await @@ -305,33 +379,37 @@ impl GenServer for RLPxConnection { } } } - _ => Err(RLPxError::MessageNotHandled( + _ => Err(PeerConnectionError::MessageNotHandled( "Unknown message or capability not handled".to_string(), )), }; if let Err(e) = result { match e { - RLPxError::Disconnected() - | RLPxError::DisconnectReceived(_) - | RLPxError::DisconnectSent(_) - | RLPxError::HandshakeError(_) - | RLPxError::NoMatchingCapabilities() - | RLPxError::InvalidPeerId() - | RLPxError::InvalidMessageLength() - | RLPxError::StateError(_) - | RLPxError::InvalidRecoveryId() => { + PeerConnectionError::Disconnected + | PeerConnectionError::DisconnectReceived(_) + | PeerConnectionError::DisconnectSent(_) + | PeerConnectionError::HandshakeError(_) + | PeerConnectionError::NoMatchingCapabilities() + | PeerConnectionError::InvalidPeerId() + | PeerConnectionError::InvalidMessageLength() + | PeerConnectionError::StateError(_) + | PeerConnectionError::InvalidRecoveryId() => { log_peer_debug(&established_state.node, &e.to_string()); return CastResponse::Stop; } - RLPxError::IoError(e) if e.kind() == std::io::ErrorKind::BrokenPipe => { + PeerConnectionError::IoError(e) + if e.kind() == std::io::ErrorKind::BrokenPipe => + { log_peer_error( &established_state.node, "Broken pipe with peer, disconnected", ); return CastResponse::Stop; } - RLPxError::StoreError(StoreError::Trie(TrieError::InconsistentTree)) => { + PeerConnectionError::StoreError(StoreError::Trie( + TrieError::InconsistentTree, + )) => { if established_state.blockchain.is_synced() { log_peer_error( &established_state.node, @@ -345,11 +423,16 @@ impl GenServer for RLPxConnection { } } _ => { + let client_id = established_state + .node + .version + .clone() + .unwrap_or("-".to_string()); log_peer_warn( &established_state.node, &format!( "Error handling cast message: {e}, for client: {} with capabilities {:?}", - established_state.client_version, established_state.capabilities + client_id, established_state.capabilities ), ); } @@ -363,8 +446,8 @@ impl GenServer for RLPxConnection { } async fn teardown(self, _handle: &GenServerHandle) -> Result<(), Self::Error> { - match self.inner_state { - InnerState::Established(mut established_state) => { + match self.state { + ConnectionState::Established(mut established_state) => { log_peer_debug( &established_state.node, "Closing connection with established peer", @@ -384,13 +467,13 @@ impl GenServer for RLPxConnection { } async fn initialize_connection( - handle: &RLPxConnectionHandle, + handle: &GenServerHandle, state: &mut Established, mut stream: S, eth_version: Arc>, -) -> Result<(), RLPxError> +) -> Result<(), PeerConnectionError> where - S: Unpin + Send + Stream> + 'static, + S: Unpin + Send + Stream> + 'static, { exchange_hello_messages(state, &mut stream).await?; @@ -402,22 +485,19 @@ where }; *eth_version .write() - .map_err(|err| RLPxError::InternalError(err.to_string()))? = version; - - // Handshake OK: handle connection - // Create channels to communicate directly to the peer - let (mut peer_channels, sender) = PeerChannels::create(handle.clone()); - - // Updating the state to establish the backend channel - state.backend_channel = Some(sender); + .map_err(|err| PeerConnectionError::InternalError(err.to_string()))? = version; init_capabilities(state, &mut stream).await?; + let mut connection = PeerConnection { + handle: handle.clone(), + }; + state .peer_table .new_connected_peer( state.node.clone(), - peer_channels.clone(), + connection.clone(), state.capabilities.clone(), ) .await?; @@ -425,7 +505,7 @@ where log_peer_debug(&state.node, "Peer connection initialized."); // Send transactions transaction hashes from mempool at connection start - send_all_pooled_tx_hashes(state, &mut peer_channels).await?; + send_all_pooled_tx_hashes(state, &mut connection).await?; // Periodic Pings repeated events. send_interval(PING_INTERVAL, handle.clone(), CastMessage::SendPing); @@ -454,7 +534,7 @@ where spawn_listener( handle.clone(), stream.filter_map(|result| match result { - Ok(msg) => Some(CastMessage::PeerMessage(msg)), + Ok(msg) => Some(CastMessage::IncomingMessage(msg)), Err(e) => { debug!(error=?e, "Error receiving RLPx message"); // Skipping invalid data @@ -479,8 +559,8 @@ where async fn send_all_pooled_tx_hashes( state: &mut Established, - peer_channels: &mut PeerChannels, -) -> Result<(), RLPxError> { + connection: &mut PeerConnection, +) -> Result<(), PeerConnectionError> { let txs: Vec = state .blockchain .mempool @@ -496,21 +576,21 @@ async fn send_all_pooled_tx_hashes( state.node.node_id(), )) .await - .map_err(|e| RLPxError::BroadcastError(e.to_string()))?; + .map_err(|e| PeerConnectionError::BroadcastError(e.to_string()))?; send_tx_hashes( txs, state.capabilities.clone(), - peer_channels, + connection, state.node.node_id(), &state.blockchain, ) .await - .map_err(|e| RLPxError::SendMessage(e.to_string()))?; + .map_err(|e| PeerConnectionError::SendMessage(e.to_string()))?; } Ok(()) } -async fn send_block_range_update(state: &mut Established) -> Result<(), RLPxError> { +async fn send_block_range_update(state: &mut Established) -> Result<(), PeerConnectionError> { // BlockRangeUpdate was introduced in eth/69 if state .negotiated_eth_capability @@ -526,7 +606,9 @@ async fn send_block_range_update(state: &mut Established) -> Result<(), RLPxErro Ok(()) } -async fn should_send_block_range_update(state: &mut Established) -> Result { +async fn should_send_block_range_update( + state: &mut Established, +) -> Result { let latest_block = state.storage.get_latest_block_number().await?; if latest_block < state.last_block_range_update_block || latest_block - state.last_block_range_update_block >= 32 @@ -536,9 +618,12 @@ async fn should_send_block_range_update(state: &mut Established) -> Result(state: &mut Established, stream: &mut S) -> Result<(), RLPxError> +async fn init_capabilities( + state: &mut Established, + stream: &mut S, +) -> Result<(), PeerConnectionError> where - S: Unpin + Stream>, + S: Unpin + Stream>, { // Sending eth Status if peer supports it if let Some(eth) = state.negotiated_eth_capability.clone() { @@ -546,7 +631,7 @@ where 68 => Message::Status68(StatusMessage68::new(&state.storage).await?), 69 => Message::Status69(StatusMessage69::new(&state.storage).await?), ver => { - return Err(RLPxError::HandshakeError(format!( + return Err(PeerConnectionError::HandshakeError(format!( "Invalid eth version {ver}" ))); } @@ -558,7 +643,7 @@ where // https://github.com/ethereum/devp2p/blob/master/caps/eth.md#status-0x00 let msg = match receive(stream).await { Some(msg) => msg?, - None => return Err(RLPxError::Disconnected()), + None => return Err(PeerConnectionError::Disconnected), }; match msg { Message::Status68(msg_data) => { @@ -570,13 +655,13 @@ where backend::validate_status(msg_data, &state.storage, ð).await? } Message::Disconnect(disconnect) => { - return Err(RLPxError::HandshakeError(format!( + return Err(PeerConnectionError::HandshakeError(format!( "Peer disconnected due to: {}", disconnect.reason() ))); } _ => { - return Err(RLPxError::HandshakeError( + return Err(PeerConnectionError::HandshakeError( "Expected a Status message".to_string(), )); } @@ -596,20 +681,20 @@ async fn send_disconnect_message(state: &mut Established, reason: Option { + PeerConnectionError::DisconnectReceived(DisconnectReason::AlreadyConnected) + | PeerConnectionError::DisconnectSent(DisconnectReason::AlreadyConnected) => { log_peer_debug(&state.node, &format!("{error_text}: ({error})")); log_peer_debug(&state.node, "Peer already connected, don't replace it"); } @@ -623,11 +708,11 @@ async fn connection_failed(state: &mut Established, error_text: &str, error: &RL } } -fn match_disconnect_reason(error: &RLPxError) -> Option { +fn match_disconnect_reason(error: &PeerConnectionError) -> Option { match error { - RLPxError::DisconnectSent(reason) => Some(*reason), - RLPxError::DisconnectReceived(reason) => Some(*reason), - RLPxError::RLPDecodeError(_) => Some(DisconnectReason::NetworkError), + PeerConnectionError::DisconnectSent(reason) => Some(*reason), + PeerConnectionError::DisconnectReceived(reason) => Some(*reason), + PeerConnectionError::RLPDecodeError(_) => Some(DisconnectReason::NetworkError), // TODO build a proper matching between error types and disconnection reasons _ => None, } @@ -636,9 +721,9 @@ fn match_disconnect_reason(error: &RLPxError) -> Option { async fn exchange_hello_messages( state: &mut Established, stream: &mut S, -) -> Result<(), RLPxError> +) -> Result<(), PeerConnectionError> where - S: Unpin + Stream>, + S: Unpin + Stream>, { let mut supported_capabilities: Vec = [ &SUPPORTED_ETH_CAPABILITIES[..], @@ -659,7 +744,7 @@ where // Receive Hello message let msg = match receive(stream).await { Some(msg) => msg?, - None => return Err(RLPxError::Disconnected()), + None => return Err(PeerConnectionError::Disconnected), }; match msg { @@ -702,7 +787,7 @@ where state.capabilities = hello_message.capabilities; if negotiated_eth_version == 0 { - return Err(RLPxError::NoMatchingCapabilities()); + return Err(PeerConnectionError::NoMatchingCapabilities()); } debug!("Negotatied eth version: eth/{}", negotiated_eth_version); state.negotiated_eth_capability = Some(Capability::eth(negotiated_eth_version)); @@ -716,15 +801,22 @@ where Ok(()) } - Message::Disconnect(disconnect) => Err(RLPxError::DisconnectReceived(disconnect.reason())), + Message::Disconnect(disconnect) => { + Err(PeerConnectionError::DisconnectReceived(disconnect.reason())) + } _ => { // Fail if it is not a hello message - Err(RLPxError::BadRequest("Expected Hello message".to_string())) + Err(PeerConnectionError::BadRequest( + "Expected Hello message".to_string(), + )) } } } -pub(crate) async fn send(state: &mut Established, message: Message) -> Result<(), RLPxError> { +pub(crate) async fn send( + state: &mut Established, + message: Message, +) -> Result<(), PeerConnectionError> { state.sink.send(message).await } @@ -739,14 +831,17 @@ pub(crate) async fn send(state: &mut Established, message: Message) -> Result<() /// while sending pings and you should not assume a disconnection. /// /// See [`Framed::new`] for more details. -async fn receive(stream: &mut S) -> Option> +async fn receive(stream: &mut S) -> Option> where - S: Unpin + Stream>, + S: Unpin + Stream>, { stream.next().await } -async fn handle_peer_message(state: &mut Established, message: Message) -> Result<(), RLPxError> { +async fn handle_incoming_message( + state: &mut Established, + message: Message, +) -> Result<(), PeerConnectionError> { let peer_supports_eth = state.negotiated_eth_capability.is_some(); let peer_supports_l2 = state.l2_state.connection_state().is_ok(); match message { @@ -766,7 +861,7 @@ async fn handle_peer_message(state: &mut Established, message: Message) -> Resul // TODO handle the disconnection request - return Err(RLPxError::DisconnectReceived(reason)); + return Err(PeerConnectionError::DisconnectReceived(reason)); } Message::Ping(_) => { log_peer_debug(&state.node, "Sending pong message"); @@ -815,7 +910,7 @@ async fn handle_peer_message(state: &mut Established, message: Message) -> Resul state.node.node_id(), )) .await - .map_err(|e| RLPxError::BroadcastError(e.to_string()))?; + .map_err(|e| PeerConnectionError::BroadcastError(e.to_string()))?; } } Message::GetBlockHeaders(msg_data) if peer_supports_eth => { @@ -842,7 +937,7 @@ async fn handle_peer_message(state: &mut Established, message: Message) -> Resul 68 => Message::Receipts68(Receipts68::new(id, receipts)), 69 => Message::Receipts69(Receipts69::new(id, receipts)), ver => { - return Err(RLPxError::InternalError(format!( + return Err(PeerConnectionError::InternalError(format!( "Invalid eth version {ver}" ))); } @@ -865,7 +960,7 @@ async fn handle_peer_message(state: &mut Established, message: Message) -> Resul &format!("disconnected from peer. Reason: {err}"), ); send_disconnect_message(state, Some(DisconnectReason::SubprotocolError)).await; - return Err(RLPxError::DisconnectSent( + return Err(PeerConnectionError::DisconnectSent( DisconnectReason::SubprotocolError, )); } @@ -893,7 +988,7 @@ async fn handle_peer_message(state: &mut Established, message: Message) -> Resul ); send_disconnect_message(state, Some(DisconnectReason::SubprotocolError)) .await; - return Err(RLPxError::DisconnectSent( + return Err(PeerConnectionError::DisconnectSent( DisconnectReason::SubprotocolError, )); } else { @@ -915,7 +1010,7 @@ async fn handle_peer_message(state: &mut Established, message: Message) -> Resul tokio::task::spawn_blocking(move || process_byte_codes_request(req, storage_clone)) .await .map_err(|_| { - RLPxError::InternalError( + PeerConnectionError::InternalError( "Failed to execute bytecode retrieval task".to_string(), ) })??; @@ -937,32 +1032,51 @@ async fn handle_peer_message(state: &mut Established, message: Message) -> Resul | message @ Message::BlockHeaders(_) | message @ Message::Receipts68(_) | message @ Message::Receipts69(_) => { - state - .backend_channel - .as_mut() - // TODO: this unwrap() is temporary, until we fix the backend process to use spawned - .expect("Backend channel is not available") - .send(message)? + if let Some((_, tx)) = message + .request_id() + .and_then(|id| state.current_requests.remove(&id)) + { + tx.send(message) + .map_err(|e| PeerConnectionError::SendMessage(e.to_string()))? + } else { + return Err(PeerConnectionError::ExpectedRequestId(format!("{message}"))); + } } // TODO: Add new message types and handlers as they are implemented - message => return Err(RLPxError::MessageNotHandled(format!("{message}"))), + message => return Err(PeerConnectionError::MessageNotHandled(format!("{message}"))), }; Ok(()) } -async fn handle_backend_message( +async fn handle_outgoing_message( state: &mut Established, message: Message, -) -> Result<(), RLPxError> { +) -> Result<(), PeerConnectionError> { log_peer_debug(&state.node, &format!("Sending message {message}")); send(state, message).await?; Ok(()) } +async fn handle_outgoing_request( + state: &mut Established, + message: Message, + sender: oneshot::Sender, +) -> Result<(), PeerConnectionError> { + // Insert the request in the request map if it supports a request id. + message.request_id().and_then(|id| { + state + .current_requests + .insert(id, (format!("{message}"), sender)) + }); + log_peer_debug(&state.node, &format!("Sending request {message}")); + send(state, message).await?; + Ok(()) +} + async fn handle_broadcast( state: &mut Established, (id, broadcasted_msg): (task::Id, Arc), -) -> Result<(), RLPxError> { +) -> Result<(), PeerConnectionError> { if id != tokio::task::id() { match broadcasted_msg.as_ref() { l2_msg @ Message::L2(_) => { @@ -971,14 +1085,14 @@ async fn handle_broadcast( msg => { let error_message = format!("Non-supported message broadcasted: {msg}"); log_peer_error(&state.node, &error_message); - return Err(RLPxError::BroadcastError(error_message)); + return Err(PeerConnectionError::BroadcastError(error_message)); } } } Ok(()) } -async fn handle_block_range_update(state: &mut Established) -> Result<(), RLPxError> { +async fn handle_block_range_update(state: &mut Established) -> Result<(), PeerConnectionError> { if should_send_block_range_update(state).await? { send_block_range_update(state).await } else { @@ -986,13 +1100,16 @@ async fn handle_block_range_update(state: &mut Established) -> Result<(), RLPxEr } } -pub(crate) fn broadcast_message(state: &Established, msg: Message) -> Result<(), RLPxError> { +pub(crate) fn broadcast_message( + state: &Established, + msg: Message, +) -> Result<(), PeerConnectionError> { match msg { l2_msg @ Message::L2(_) => broadcast_l2_message(state, l2_msg), msg => { let error_message = format!("Broadcasting for msg: {msg} is not supported"); log_peer_error(&state.node, &error_message); - Err(RLPxError::BroadcastError(error_message)) + Err(PeerConnectionError::BroadcastError(error_message)) } } } diff --git a/crates/networking/p2p/rlpx/error.rs b/crates/networking/p2p/rlpx/error.rs index 81cb7eee190..71e8e0337ab 100644 --- a/crates/networking/p2p/rlpx/error.rs +++ b/crates/networking/p2p/rlpx/error.rs @@ -1,13 +1,10 @@ +use super::{message::Message, p2p::DisconnectReason}; +use crate::discv4::peer_table::PeerTableError; use ethrex_blockchain::error::{ChainError, MempoolError}; use ethrex_rlp::error::{RLPDecodeError, RLPEncodeError}; use ethrex_storage::error::StoreError; use ethrex_storage_rollup::RollupStoreError; use thiserror::Error; -use tokio::sync::broadcast::error::RecvError; - -use crate::discv4::peer_table::PeerTableError; - -use super::{message::Message, p2p::DisconnectReason}; #[derive(Debug, Error)] pub enum CryptographyError { @@ -21,7 +18,7 @@ pub enum CryptographyError { // TODO improve errors #[derive(Debug, Error)] -pub enum RLPxError { +pub enum PeerConnectionError { #[error("{0}")] HandshakeError(String), #[error("Invalid connection state: {0}")] @@ -29,7 +26,7 @@ pub enum RLPxError { #[error("No matching capabilities")] NoMatchingCapabilities(), #[error("Peer disconnected")] - Disconnected(), + Disconnected, #[error("Disconnect requested: {0}")] DisconnectReceived(DisconnectReason), #[error("Disconnect sent: {0}")] @@ -42,6 +39,8 @@ pub enum RLPxError { InvalidRecoveryId(), #[error("Invalid message length")] InvalidMessageLength(), + #[error("Request id not present: {0}")] + ExpectedRequestId(String), #[error("Cannot handle message: {0}")] MessageNotHandled(String), #[error("Bad Request: {0}")] @@ -58,8 +57,8 @@ pub enum RLPxError { CryptographyError(String), #[error("Failed to broadcast msg: {0}")] BroadcastError(String), - #[error(transparent)] - RecvError(#[from] RecvError), + #[error("RecvError: {0}")] + RecvError(String), #[error("Failed to send msg: {0}")] SendMessage(String), #[error("Error when inserting transaction in the mempool: {0}")] @@ -82,11 +81,15 @@ pub enum RLPxError { InvalidBlockRangeUpdate, #[error(transparent)] PeerTableError(#[from] PeerTableError), + #[error("Request timeouted")] + Timeout, + #[error("Unexpected response: Expected {0}, got {1}")] + UnexpectedResponse(String, String), } // tokio::sync::mpsc::error::SendError is too large to be part of the RLPxError enum directly // so we will instead save the error's display message -impl From> for RLPxError { +impl From> for PeerConnectionError { fn from(value: tokio::sync::mpsc::error::SendError) -> Self { Self::SendMessage(value.to_string()) } @@ -94,20 +97,32 @@ impl From> for RLPxError { // Grouping all cryptographic related errors in a single CryptographicError variant // We can improve this to individual errors if required -impl From for RLPxError { +impl From for PeerConnectionError { fn from(e: secp256k1::Error) -> Self { - RLPxError::CryptographyError(e.to_string()) + PeerConnectionError::CryptographyError(e.to_string()) } } -impl From for RLPxError { +impl From for PeerConnectionError { fn from(e: sha3::digest::InvalidLength) -> Self { - RLPxError::CryptographyError(e.to_string()) + PeerConnectionError::CryptographyError(e.to_string()) } } -impl From for RLPxError { +impl From for PeerConnectionError { fn from(e: aes::cipher::StreamCipherError) -> Self { - RLPxError::CryptographyError(e.to_string()) + PeerConnectionError::CryptographyError(e.to_string()) + } +} + +impl From for PeerConnectionError { + fn from(e: tokio::sync::broadcast::error::RecvError) -> Self { + PeerConnectionError::RecvError(e.to_string()) + } +} + +impl From for PeerConnectionError { + fn from(e: tokio::sync::oneshot::error::RecvError) -> Self { + PeerConnectionError::RecvError(e.to_string()) } } diff --git a/crates/networking/p2p/rlpx/eth/backend.rs b/crates/networking/p2p/rlpx/eth/backend.rs index b3a6f4bb8e7..a073b3bef29 100644 --- a/crates/networking/p2p/rlpx/eth/backend.rs +++ b/crates/networking/p2p/rlpx/eth/backend.rs @@ -1,7 +1,7 @@ use ethrex_common::types::ForkId; use ethrex_storage::Store; -use crate::rlpx::{error::RLPxError, p2p::Capability}; +use crate::rlpx::{error::PeerConnectionError, p2p::Capability}; use super::status::StatusMessage; @@ -9,18 +9,21 @@ pub async fn validate_status( msg_data: ST, storage: &Store, eth_capability: &Capability, -) -> Result<(), RLPxError> { +) -> Result<(), PeerConnectionError> { let chain_config = storage.get_chain_config()?; // These blocks must always be available let genesis_header = storage .get_block_header(0)? - .ok_or(RLPxError::NotFound("Genesis Block".to_string()))?; + .ok_or(PeerConnectionError::NotFound("Genesis Block".to_string()))?; let genesis_hash = genesis_header.hash(); let latest_block_number = storage.get_latest_block_number().await?; - let latest_block_header = storage - .get_block_header(latest_block_number)? - .ok_or(RLPxError::NotFound(format!("Block {latest_block_number}")))?; + let latest_block_header = + storage + .get_block_header(latest_block_number)? + .ok_or(PeerConnectionError::NotFound(format!( + "Block {latest_block_number}" + )))?; let fork_id = ForkId::new( chain_config, genesis_header.clone(), @@ -30,19 +33,19 @@ pub async fn validate_status( //Check networkID if msg_data.get_network_id() != chain_config.chain_id { - return Err(RLPxError::HandshakeError( + return Err(PeerConnectionError::HandshakeError( "Network Id does not match".to_string(), )); } //Check Protocol Version if msg_data.get_eth_version() != eth_capability.version { - return Err(RLPxError::HandshakeError( + return Err(PeerConnectionError::HandshakeError( "Eth protocol version does not match".to_string(), )); } //Check Genesis if msg_data.get_genesis() != genesis_hash { - return Err(RLPxError::HandshakeError( + return Err(PeerConnectionError::HandshakeError( "Genesis does not match".to_string(), )); } @@ -54,7 +57,9 @@ pub async fn validate_status( chain_config, genesis_header, ) { - return Err(RLPxError::HandshakeError("Invalid Fork Id".to_string())); + return Err(PeerConnectionError::HandshakeError( + "Invalid Fork Id".to_string(), + )); } Ok(()) diff --git a/crates/networking/p2p/rlpx/eth/eth68/status.rs b/crates/networking/p2p/rlpx/eth/eth68/status.rs index 792cd9758fc..5aee56302b8 100644 --- a/crates/networking/p2p/rlpx/eth/eth68/status.rs +++ b/crates/networking/p2p/rlpx/eth/eth68/status.rs @@ -1,5 +1,5 @@ use crate::rlpx::{ - error::RLPxError, + error::PeerConnectionError, eth::status::StatusMessage, message::RLPxMessage, utils::{snappy_compress, snappy_decompress}, @@ -75,7 +75,7 @@ impl RLPxMessage for StatusMessage68 { } impl StatusMessage68 { - pub async fn new(storage: &Store) -> Result { + pub async fn new(storage: &Store) -> Result { let chain_config = storage.get_chain_config()?; let total_difficulty = U256::from(chain_config.terminal_total_difficulty.unwrap_or_default()); @@ -84,11 +84,14 @@ impl StatusMessage68 { // These blocks must always be available let genesis_header = storage .get_block_header(0)? - .ok_or(RLPxError::NotFound("Genesis Block".to_string()))?; + .ok_or(PeerConnectionError::NotFound("Genesis Block".to_string()))?; let lastest_block = storage.get_latest_block_number().await?; - let block_header = storage - .get_block_header(lastest_block)? - .ok_or(RLPxError::NotFound(format!("Block {lastest_block}")))?; + let block_header = + storage + .get_block_header(lastest_block)? + .ok_or(PeerConnectionError::NotFound(format!( + "Block {lastest_block}" + )))?; let genesis = genesis_header.hash(); let lastest_block_hash = block_header.hash(); diff --git a/crates/networking/p2p/rlpx/eth/eth69/status.rs b/crates/networking/p2p/rlpx/eth/eth69/status.rs index f33f9856894..c294d78593e 100644 --- a/crates/networking/p2p/rlpx/eth/eth69/status.rs +++ b/crates/networking/p2p/rlpx/eth/eth69/status.rs @@ -1,5 +1,5 @@ use crate::rlpx::{ - error::RLPxError, + error::PeerConnectionError, eth::status::StatusMessage, message::RLPxMessage, utils::{snappy_compress, snappy_decompress}, @@ -76,18 +76,21 @@ impl RLPxMessage for StatusMessage69 { } impl StatusMessage69 { - pub async fn new(storage: &Store) -> Result { + pub async fn new(storage: &Store) -> Result { let chain_config = storage.get_chain_config()?; let network_id = chain_config.chain_id; // These blocks must always be available let genesis_header = storage .get_block_header(0)? - .ok_or(RLPxError::NotFound("Genesis Block".to_string()))?; + .ok_or(PeerConnectionError::NotFound("Genesis Block".to_string()))?; let lastest_block = storage.get_latest_block_number().await?; - let block_header = storage - .get_block_header(lastest_block)? - .ok_or(RLPxError::NotFound(format!("Block {lastest_block}")))?; + let block_header = + storage + .get_block_header(lastest_block)? + .ok_or(PeerConnectionError::NotFound(format!( + "Block {lastest_block}" + )))?; let genesis = genesis_header.hash(); let lastest_block_hash = block_header.hash(); diff --git a/crates/networking/p2p/rlpx/eth/update.rs b/crates/networking/p2p/rlpx/eth/update.rs index 714f6606134..7e9ea31c87b 100644 --- a/crates/networking/p2p/rlpx/eth/update.rs +++ b/crates/networking/p2p/rlpx/eth/update.rs @@ -1,4 +1,4 @@ -use crate::rlpx::error::RLPxError; +use crate::rlpx::error::PeerConnectionError; use crate::rlpx::{ message::RLPxMessage, utils::{snappy_compress, snappy_decompress}, @@ -19,11 +19,14 @@ pub struct BlockRangeUpdate { } impl BlockRangeUpdate { - pub async fn new(storage: &Store) -> Result { + pub async fn new(storage: &Store) -> Result { let latest_block = storage.get_latest_block_number().await?; - let block_header = storage - .get_block_header(latest_block)? - .ok_or(RLPxError::NotFound(format!("Block {latest_block}")))?; + let block_header = + storage + .get_block_header(latest_block)? + .ok_or(PeerConnectionError::NotFound(format!( + "Block {latest_block}" + )))?; let latest_block_hash = block_header.hash(); Ok(Self { @@ -34,9 +37,9 @@ impl BlockRangeUpdate { } /// Validates an incoming BlockRangeUpdate from a peer - pub fn validate(&self) -> Result<(), RLPxError> { + pub fn validate(&self) -> Result<(), PeerConnectionError> { if self.earliest_block > self.latest_block || self.latest_block_hash.is_zero() { - return Err(RLPxError::InvalidBlockRangeUpdate); + return Err(PeerConnectionError::InvalidBlockRangeUpdate); } Ok(()) } diff --git a/crates/networking/p2p/rlpx/initiator.rs b/crates/networking/p2p/rlpx/initiator.rs index 7d1d806d376..728718abf64 100644 --- a/crates/networking/p2p/rlpx/initiator.rs +++ b/crates/networking/p2p/rlpx/initiator.rs @@ -1,16 +1,14 @@ -use std::time::Duration; - +use crate::{ + discv4::peer_table::PeerTableError, metrics::METRICS, network::P2PContext, + rlpx::connection::server::PeerConnection, +}; use spawned_concurrency::{ messages::Unused, tasks::{CastResponse, GenServer, send_after}, }; - +use std::time::Duration; use tracing::{debug, error, info}; -use crate::{discv4::peer_table::PeerTableError, metrics::METRICS, network::P2PContext}; - -use crate::rlpx::connection::server::RLPxConnection; - #[derive(Debug, thiserror::Error)] pub enum RLPxInitiatorError { #[error(transparent)] @@ -63,7 +61,7 @@ impl RLPxInitiator { .await?; for contact in contacts { - RLPxConnection::spawn_as_initiator(self.context.clone(), &contact.node).await; + PeerConnection::spawn_as_initiator(self.context.clone(), &contact.node).await; METRICS.record_new_rlpx_conn_attempt().await; } Ok(()) diff --git a/crates/networking/p2p/rlpx/l2/l2_connection.rs b/crates/networking/p2p/rlpx/l2/l2_connection.rs index ffa524ef94b..c8d975869dd 100644 --- a/crates/networking/p2p/rlpx/l2/l2_connection.rs +++ b/crates/networking/p2p/rlpx/l2/l2_connection.rs @@ -1,7 +1,7 @@ use crate::rlpx::connection::server::{broadcast_message, send}; use crate::rlpx::l2::messages::{BatchSealed, L2Message, NewBlock}; use crate::rlpx::utils::log_peer_error; -use crate::rlpx::{connection::server::Established, error::RLPxError, message::Message}; +use crate::rlpx::{connection::server::Established, error::PeerConnectionError, message::Message}; use ethereum_types::Address; use ethereum_types::Signature; use ethrex_blockchain::error::ChainError; @@ -56,24 +56,26 @@ impl L2ConnState { } } - pub(crate) fn connection_state_mut(&mut self) -> Result<&mut L2ConnectedState, RLPxError> { + pub(crate) fn connection_state_mut( + &mut self, + ) -> Result<&mut L2ConnectedState, PeerConnectionError> { match self { - Self::Unsupported => Err(RLPxError::IncompatibleProtocol), - Self::Disconnected(_) => Err(RLPxError::L2CapabilityNotNegotiated), + Self::Unsupported => Err(PeerConnectionError::IncompatibleProtocol), + Self::Disconnected(_) => Err(PeerConnectionError::L2CapabilityNotNegotiated), Self::Connected(conn_state) => Ok(conn_state), } } - pub(crate) fn connection_state(&self) -> Result<&L2ConnectedState, RLPxError> { + pub(crate) fn connection_state(&self) -> Result<&L2ConnectedState, PeerConnectionError> { match self { - Self::Unsupported => Err(RLPxError::IncompatibleProtocol), - Self::Disconnected(_) => Err(RLPxError::L2CapabilityNotNegotiated), + Self::Unsupported => Err(PeerConnectionError::IncompatibleProtocol), + Self::Disconnected(_) => Err(PeerConnectionError::L2CapabilityNotNegotiated), Self::Connected(conn_state) => Ok(conn_state), } } - pub(crate) fn set_established(&mut self) -> Result<(), RLPxError> { + pub(crate) fn set_established(&mut self) -> Result<(), PeerConnectionError> { match self { - Self::Unsupported => Err(RLPxError::IncompatibleProtocol), + Self::Unsupported => Err(PeerConnectionError::IncompatibleProtocol), Self::Disconnected(ctxt) => { let state = L2ConnectedState { latest_block_sent: 0, @@ -101,7 +103,7 @@ fn validate_signature(_recovered_lead_sequencer: Address) -> bool { pub(crate) async fn handle_based_capability_message( established: &mut Established, msg: L2Message, -) -> Result<(), RLPxError> { +) -> Result<(), PeerConnectionError> { established.l2_state.connection_state()?; match msg { L2Message::BatchSealed(ref batch_sealed_msg) => { @@ -123,18 +125,21 @@ pub(crate) async fn handle_based_capability_message( pub(crate) async fn handle_l2_broadcast( state: &mut Established, l2_msg: &Message, -) -> Result<(), RLPxError> { +) -> Result<(), PeerConnectionError> { match l2_msg { msg @ Message::L2(L2Message::BatchSealed(_)) => send(state, msg.clone()).await, msg @ Message::L2(L2Message::NewBlock(_)) => send(state, msg.clone()).await, - _ => Err(RLPxError::BroadcastError(format!( + _ => Err(PeerConnectionError::BroadcastError(format!( "Message {:?} is not a valid L2 message for broadcast", l2_msg )))?, } } -pub(crate) fn broadcast_l2_message(state: &Established, l2_msg: Message) -> Result<(), RLPxError> { +pub(crate) fn broadcast_l2_message( + state: &Established, + l2_msg: Message, +) -> Result<(), PeerConnectionError> { match l2_msg { msg @ Message::L2(L2Message::BatchSealed(_)) => { let task_id = tokio::task::id(); @@ -148,7 +153,7 @@ pub(crate) fn broadcast_l2_message(state: &Established, l2_msg: Message) -> Resu ); }) .map_err(|_| { - RLPxError::BroadcastError( + PeerConnectionError::BroadcastError( "Could not broadcast l2 message BatchSealed".to_owned(), ) })?; @@ -166,17 +171,21 @@ pub(crate) fn broadcast_l2_message(state: &Established, l2_msg: Message) -> Resu ); }) .map_err(|_| { - RLPxError::BroadcastError("Could not broadcast l2 message NewBlock".to_owned()) + PeerConnectionError::BroadcastError( + "Could not broadcast l2 message NewBlock".to_owned(), + ) })?; Ok(()) } - _ => Err(RLPxError::BroadcastError(format!( + _ => Err(PeerConnectionError::BroadcastError(format!( "Message {:?} is not a valid L2 message for broadcast", l2_msg ))), } } -pub(crate) async fn send_new_block(established: &mut Established) -> Result<(), RLPxError> { +pub(crate) async fn send_new_block( + established: &mut Established, +) -> Result<(), PeerConnectionError> { let latest_block_number = established.storage.get_latest_block_number().await?; let latest_block_sent = established .l2_state @@ -194,11 +203,11 @@ pub(crate) async fn send_new_block(established: &mut Established) -> Result<(), .storage .get_block_body(block_number) .await? - .ok_or(RLPxError::InternalError( + .ok_or(PeerConnectionError::InternalError( "Block body not found after querying for the block number".to_owned(), ))?; let new_block_header = established.storage.get_block_header(block_number)?.ok_or( - RLPxError::InternalError( + PeerConnectionError::InternalError( "Block header not found after querying for the block number".to_owned(), ), )?; @@ -221,7 +230,7 @@ pub(crate) async fn send_new_block(established: &mut Established) -> Result<(), .serialize_compact(); let recovery_id: u8 = Into::::into(recovery_id).try_into().map_err(|e| { - RLPxError::InternalError(format!( + PeerConnectionError::InternalError(format!( "Failed to convert recovery id to u8: {e}. This is a bug." )) })?; @@ -255,7 +264,7 @@ pub(crate) async fn send_new_block(established: &mut Established) -> Result<(), async fn should_process_new_block( established: &mut Established, msg: &NewBlock, -) -> Result { +) -> Result { let l2_state = established.l2_state.connection_state_mut()?; if !established.blockchain.is_synced() { debug!("Not processing new block, blockchain is not synced"); @@ -279,13 +288,15 @@ async fn should_process_new_block( let recovered_lead_sequencer = tokio::task::spawn_blocking(move || recover_address(msg_signature, block_hash)) .await - .map_err(|_| RLPxError::InternalError("Recover Address task failed".to_string()))? + .map_err(|_| { + PeerConnectionError::InternalError("Recover Address task failed".to_string()) + })? .map_err(|e| { log_peer_error( &established.node, &format!("Failed to recover lead sequencer: {e}"), ); - RLPxError::CryptographyError(e.to_string()) + PeerConnectionError::CryptographyError(e.to_string()) })?; if !validate_signature(recovered_lead_sequencer) { @@ -301,7 +312,7 @@ async fn should_process_new_block( async fn should_process_batch_sealed( established: &mut Established, msg: &BatchSealed, -) -> Result { +) -> Result { let l2_state = established.l2_state.connection_state_mut()?; if !established.blockchain.is_synced() { debug!("Not processing BatchSealedMessage, blockchain is not synced"); @@ -334,7 +345,7 @@ async fn should_process_batch_sealed( &established.node, &format!("Failed to recover lead sequencer: {e}"), ); - RLPxError::CryptographyError(e.to_string()) + PeerConnectionError::CryptographyError(e.to_string()) })?; if !validate_signature(recovered_lead_sequencer) { @@ -347,7 +358,10 @@ async fn should_process_batch_sealed( Ok(true) } -async fn process_new_block(established: &mut Established, msg: &NewBlock) -> Result<(), RLPxError> { +async fn process_new_block( + established: &mut Established, + msg: &NewBlock, +) -> Result<(), PeerConnectionError> { let l2_state = established.l2_state.connection_state_mut()?; l2_state .blocks_on_queue @@ -366,7 +380,7 @@ async fn process_new_block(established: &mut Established, msg: &NewBlock) -> Res let block_hash = block.hash(); let block_number = block.header.number; let block = Arc::::try_unwrap(block).map_err(|_| { - RLPxError::InternalError("Failed to take ownership of block".to_string()) + PeerConnectionError::InternalError("Failed to take ownership of block".to_string()) })?; established .blockchain @@ -385,7 +399,7 @@ async fn process_new_block(established: &mut Established, msg: &NewBlock) -> Res apply_fork_choice(&established.storage, block_hash, block_hash, block_hash) .await .map_err(|e| { - RLPxError::BlockchainError(ChainError::Custom(format!( + PeerConnectionError::BlockchainError(ChainError::Custom(format!( "Error adding new block {} with hash {:?}, error: {e}", block_number, block_hash ))) @@ -400,7 +414,9 @@ async fn process_new_block(established: &mut Established, msg: &NewBlock) -> Res Ok(()) } -pub(crate) async fn send_sealed_batch(established: &mut Established) -> Result<(), RLPxError> { +pub(crate) async fn send_sealed_batch( + established: &mut Established, +) -> Result<(), PeerConnectionError> { let batch_sealed_msg = { let l2_state = established.l2_state.connection_state_mut()?; let next_batch_to_send = l2_state.latest_batch_sent + 1; @@ -450,7 +466,7 @@ pub(crate) async fn send_sealed_batch(established: &mut Established) -> Result<( async fn process_batch_sealed( established: &mut Established, msg: &BatchSealed, -) -> Result<(), RLPxError> { +) -> Result<(), PeerConnectionError> { let l2_state = established.l2_state.connection_state_mut()?; l2_state.store_rollup.seal_batch(*msg.batch.clone()).await?; info!( diff --git a/crates/networking/p2p/rlpx/l2/messages.rs b/crates/networking/p2p/rlpx/l2/messages.rs index d082cb9b744..f1631b8bcd4 100644 --- a/crates/networking/p2p/rlpx/l2/messages.rs +++ b/crates/networking/p2p/rlpx/l2/messages.rs @@ -1,5 +1,5 @@ use crate::rlpx::{ - error::RLPxError, + error::PeerConnectionError, message::{Message, RLPxMessage}, utils::{snappy_compress, snappy_decompress}, }; @@ -60,13 +60,16 @@ pub struct BatchSealed { } impl BatchSealed { - pub fn from_batch_and_key(batch: Batch, secret_key: &SecretKey) -> Result { + pub fn from_batch_and_key( + batch: Batch, + secret_key: &SecretKey, + ) -> Result { let hash = batch_hash(&batch); let (recovery_id, signature) = secp256k1::SECP256K1 .sign_ecdsa_recoverable(&SecpMessage::from_digest(hash.into()), secret_key) .serialize_compact(); let recovery_id: u8 = Into::::into(recovery_id).try_into().map_err(|e| { - RLPxError::InternalError(format!( + PeerConnectionError::InternalError(format!( "Failed to convert recovery id to u8: {e}. This is a bug." )) })?; diff --git a/crates/networking/p2p/rlpx/message.rs b/crates/networking/p2p/rlpx/message.rs index 4372229297e..de020cb0cdb 100644 --- a/crates/networking/p2p/rlpx/message.rs +++ b/crates/networking/p2p/rlpx/message.rs @@ -282,6 +282,39 @@ impl Message { }, } } + + pub fn request_id(&self) -> Option { + match self { + Message::GetBlockHeaders(message) => Some(message.id), + Message::GetBlockBodies(message) => Some(message.id), + Message::GetPooledTransactions(message) => Some(message.id), + Message::GetReceipts(message) => Some(message.id), + Message::GetAccountRange(message) => Some(message.id), + Message::GetStorageRanges(message) => Some(message.id), + Message::GetByteCodes(message) => Some(message.id), + Message::GetTrieNodes(message) => Some(message.id), + Message::BlockHeaders(message) => Some(message.id), + Message::BlockBodies(message) => Some(message.id), + Message::PooledTransactions(message) => Some(message.id), + Message::Receipts68(message) => Some(message.id), + Message::Receipts69(message) => Some(message.id), + Message::AccountRange(message) => Some(message.id), + Message::StorageRanges(message) => Some(message.id), + Message::ByteCodes(message) => Some(message.id), + Message::TrieNodes(message) => Some(message.id), + // The rest of the message types does not have a request id. + Message::Hello(_) + | Message::Disconnect(_) + | Message::Ping(_) + | Message::Pong(_) + | Message::Status68(_) + | Message::Status69(_) + | Message::Transactions(_) + | Message::NewPooledTransactionHashes(_) + | Message::BlockRangeUpdate(_) + | Message::L2(_) => None, + } + } } impl Display for Message { diff --git a/crates/networking/p2p/snap.rs b/crates/networking/p2p/snap.rs index 10b7a189be6..e7897abfc92 100644 --- a/crates/networking/p2p/snap.rs +++ b/crates/networking/p2p/snap.rs @@ -3,7 +3,7 @@ use ethrex_rlp::encode::RLPEncode; use ethrex_storage::{Store, error::StoreError}; use crate::rlpx::{ - error::RLPxError, + error::PeerConnectionError, snap::{ AccountRange, AccountRangeUnit, AccountStateSlim, ByteCodes, GetAccountRange, GetByteCodes, GetStorageRanges, GetTrieNodes, StorageRanges, StorageSlot, TrieNodes, @@ -129,13 +129,13 @@ pub fn process_byte_codes_request( pub async fn process_trie_nodes_request( request: GetTrieNodes, store: Store, -) -> Result { +) -> Result { tokio::task::spawn_blocking(move || { let mut nodes = vec![]; let mut remaining_bytes = request.bytes; for paths in request.paths { if paths.is_empty() { - return Err(RLPxError::BadRequest( + return Err(PeerConnectionError::BadRequest( "zero-item pathset requested".to_string(), )); } diff --git a/crates/networking/p2p/sync.rs b/crates/networking/p2p/sync.rs index 2c87f82c3c6..2e4c3c4418a 100644 --- a/crates/networking/p2p/sync.rs +++ b/crates/networking/p2p/sync.rs @@ -815,17 +815,6 @@ impl SnapBlockSyncState { } } -/// Safety function that frees all peer and logs an error if we found freed peers when not expectig to -/// Logs with where the function was when it found this error -/// TODO: remove this function once peer table has moved to spawned implementation -async fn free_peers_and_log_if_not_empty(peer_handler: &mut PeerHandler) -> Result<(), SyncError> { - if peer_handler.peer_table.free_peers().await? != 0 { - let step = METRICS.current_step.get(); - error!("Found peers marked as used even though we just finished this step: step = {step}"); - }; - Ok(()) -} - impl Syncer { async fn snap_sync( &mut self, @@ -888,7 +877,6 @@ impl Syncer { block_sync_state, ) .await?; - free_peers_and_log_if_not_empty(&mut self.peers).await?; info!("Finish downloading account ranges from peers"); *METRICS.account_tries_insert_start_time.lock().await = Some(SystemTime::now()); @@ -949,7 +937,6 @@ impl Syncer { { continue; }; - free_peers_and_log_if_not_empty(&mut self.peers).await?; info!( "Started request_storage_ranges with {} accounts with storage root unchanged", @@ -988,7 +975,6 @@ impl Syncer { storage_accounts.accounts_with_storage_root.clear(); } - free_peers_and_log_if_not_empty(&mut self.peers).await?; info!( "Ended request_storage_ranges with {} accounts with storage root unchanged and not downloaded yet and with {} big/healed accounts", @@ -1068,8 +1054,6 @@ impl Syncer { &mut global_storage_leafs_healed, ) .await?; - - free_peers_and_log_if_not_empty(&mut self.peers).await?; } *METRICS.heal_end_time.lock().await = Some(SystemTime::now()); @@ -1237,7 +1221,7 @@ pub async fn update_pivot( block_number, block_timestamp, new_pivot_block_number ); loop { - let (peer_id, mut peer_channel) = peers + let (peer_id, mut connection) = peers .peer_table .get_best_peer(&SUPPORTED_ETH_CAPABILITIES) .await? @@ -1248,7 +1232,7 @@ pub async fn update_pivot( "Trying to update pivot to {new_pivot_block_number} with peer {peer_id} (score: {peer_score})" ); let Some(pivot) = peers - .get_block_header(&mut peer_channel, new_pivot_block_number) + .get_block_header(peer_id, &mut connection, new_pivot_block_number) .await .map_err(SyncError::PeerHandler)? else { @@ -1345,7 +1329,7 @@ pub enum SyncError { CodeHashesSnapshotsDirNotFound, #[error("Got different state roots for account hash: {0:?}, expected: {1:?}, computed: {2:?}")] DifferentStateRoots(H256, H256, H256), - #[error("We aren't finding get_peer_channel_with_retry")] + #[error("Cannot find suitable peer")] NoPeers, #[error("Failed to get block headers")] NoBlockHeaders, diff --git a/crates/networking/p2p/sync/state_healing.rs b/crates/networking/p2p/sync/state_healing.rs index 8a302a96b00..6cea48e6667 100644 --- a/crates/networking/p2p/sync/state_healing.rs +++ b/crates/networking/p2p/sync/state_healing.rs @@ -162,8 +162,6 @@ async fn heal_state_trie( } if let Ok((peer_id, response, batch)) = res { inflight_tasks -= 1; - // Mark the peer as available - peers.peer_table.free_peer(&peer_id).await?; match response { // If the peers responded with nodes, add them to the nodes_to_heal vector Ok(nodes) => { @@ -225,9 +223,9 @@ async fn heal_state_trie( .unwrap_or_default(), longest_path_seen, ); - let Some((peer_id, mut peer_channel)) = peers + let Some((peer_id, connection)) = peers .peer_table - .use_best_peer(&SUPPORTED_SNAP_CAPABILITIES) + .get_best_peer(&SUPPORTED_SNAP_CAPABILITIES) .await .inspect_err( |err| error!(err= ?err, "Error requesting a peer to perform state healing"), @@ -242,10 +240,13 @@ async fn heal_state_trie( let tx = task_sender.clone(); inflight_tasks += 1; + let peer_table = peers.peer_table.clone(); tokio::spawn(async move { // TODO: check errors to determine whether the current block is stale let response = PeerHandler::request_state_trienodes( - &mut peer_channel, + peer_id, + connection, + peer_table, state_root, batch.clone(), ) diff --git a/crates/networking/p2p/sync/storage_healing.rs b/crates/networking/p2p/sync/storage_healing.rs index dccf25dab7d..3966f66e16d 100644 --- a/crates/networking/p2p/sync/storage_healing.rs +++ b/crates/networking/p2p/sync/storage_healing.rs @@ -279,7 +279,7 @@ pub async fn heal_storage_trie( ) .expect("We shouldn't be getting store errors"); // TODO: if we have a stor error we should stop } - Err(RequestStorageTrieNodes::SendMessageError(id, _err)) => { + Err(RequestStorageTrieNodes::RequestError(id, _err)) => { let inflight_request = state.requests.remove(&id).expect("request disappeared"); state.failed_downloads += 1; state @@ -287,7 +287,7 @@ pub async fn heal_storage_trie( .extend(inflight_request.requests.clone()); peers .peer_table - .free_with_failure(&inflight_request.peer_id) + .record_failure(&inflight_request.peer_id) .await?; } } @@ -306,9 +306,9 @@ async fn ask_peers_for_nodes( task_sender: &Sender>, ) { if (requests.len() as u32) < MAX_IN_FLIGHT_REQUESTS && !download_queue.is_empty() { - let Some((peer_id, mut peer_channel)) = peers + let Some((peer_id, connection)) = peers .peer_table - .use_best_peer(&SUPPORTED_SNAP_CAPABILITIES) + .get_best_peer(&SUPPORTED_SNAP_CAPABILITIES) .await .inspect_err( |err| error!(err= ?err, "Error requesting a peer to perform storage healing"), @@ -340,10 +340,13 @@ async fn ask_peers_for_nodes( let tx = task_sender.clone(); + let peer_table = peers.peer_table.clone(); + requests_task_joinset.spawn(async move { let req_id = gtn.id; // TODO: check errors to determine whether the current block is stale - let response = PeerHandler::request_storage_trienodes(&mut peer_channel, gtn).await; + let response = + PeerHandler::request_storage_trienodes(peer_id, connection, peer_table, gtn).await; // TODO: add error handling tx.try_send(response).inspect_err(|err| { error!("Failed to send state trie nodes response. Error: {err}") @@ -402,7 +405,6 @@ async fn zip_requeue_node_responses_score_peer( info!("No matching request found for received response {trie_nodes:?}"); return Ok(None); }; - peer_handler.peer_table.free_peer(&request.peer_id).await?; let nodes_size = trie_nodes.nodes.len(); if nodes_size == 0 { diff --git a/crates/networking/p2p/tx_broadcaster.rs b/crates/networking/p2p/tx_broadcaster.rs index a07ad66e7b1..eeb1799510f 100644 --- a/crates/networking/p2p/tx_broadcaster.rs +++ b/crates/networking/p2p/tx_broadcaster.rs @@ -16,10 +16,10 @@ use spawned_concurrency::{ use tracing::{debug, error, info}; use crate::{ - discv4::peer_table::{PeerChannels, PeerTableError, PeerTableHandle}, + discv4::peer_table::{PeerTable, PeerTableError}, rlpx::{ Message, - connection::server::CastMessage, + connection::server::PeerConnection, eth::transactions::{NewPooledTransactionHashes, Transactions}, p2p::{Capability, SUPPORTED_ETH_CAPABILITIES}, }, @@ -89,7 +89,7 @@ impl Default for BroadcastRecord { #[derive(Debug, Clone)] pub struct TxBroadcaster { - peer_table: PeerTableHandle, + peer_table: PeerTable, blockchain: Arc, // tx_hash -> broadcast record (which peers know it and when it was last sent) known_txs: HashMap, @@ -113,7 +113,7 @@ pub enum OutMessage { impl TxBroadcaster { pub async fn spawn( - kademlia: PeerTableHandle, + kademlia: PeerTable, blockchain: Arc, ) -> Result, TxBroadcasterError> { info!("Starting Transaction Broadcaster"); @@ -207,7 +207,7 @@ impl TxBroadcaster { let (peers_to_send_full_txs, peers_to_send_hashes) = shuffled_peers.split_at(peer_sqrt.ceil() as usize); - for (peer_id, mut peer_channels, capabilities) in peers_to_send_full_txs.iter().cloned() { + for (peer_id, mut connection, capabilities) in peers_to_send_full_txs.iter().cloned() { let peer_idx = self.peer_index(peer_id); let txs_to_send = full_txs .iter() @@ -225,20 +225,18 @@ impl TxBroadcaster { let txs_message = Message::Transactions(Transactions { transactions: txs_to_send, }); - peer_channels.connection.cast(CastMessage::BackendMessage( - txs_message, - )).await.unwrap_or_else(|err| { + connection.outgoing_message(txs_message).await.unwrap_or_else(|err| { error!(peer_id = %format!("{:#x}", peer_id), err = ?err, "Failed to send transactions"); }); - self.send_tx_hashes(blob_txs.clone(), capabilities, &mut peer_channels, peer_id) + self.send_tx_hashes(blob_txs.clone(), capabilities, &mut connection, peer_id) .await?; } - for (peer_id, mut peer_channels, capabilities) in peers_to_send_hashes.iter().cloned() { + for (peer_id, mut connection, capabilities) in peers_to_send_hashes.iter().cloned() { // If a peer is not selected to receive the full transactions, we only send the hashes of all transactions (including blob transactions) self.send_tx_hashes( txs_to_broadcast.clone(), capabilities, - &mut peer_channels, + &mut connection, peer_id, ) .await?; @@ -251,7 +249,7 @@ impl TxBroadcaster { &mut self, txs: Vec, capabilities: Vec, - peer_channels: &mut PeerChannels, + connection: &mut PeerConnection, peer_id: H256, ) -> Result<(), TxBroadcasterError> { let peer_idx = self.peer_index(peer_id); @@ -270,7 +268,7 @@ impl TxBroadcaster { send_tx_hashes( txs_to_send, capabilities, - peer_channels, + connection, peer_id, &self.blockchain, ) @@ -281,7 +279,7 @@ impl TxBroadcaster { pub async fn send_tx_hashes( txs: Vec, capabilities: Vec, - peer_channels: &mut PeerChannels, + connection: &mut PeerConnection, peer_id: H256, blockchain: &Arc, ) -> Result<(), TxBroadcasterError> { @@ -298,11 +296,9 @@ pub async fn send_tx_hashes( let hashes_message = Message::NewPooledTransactionHashes( NewPooledTransactionHashes::new(txs_to_send, blockchain)?, ); - peer_channels.connection.cast(CastMessage::BackendMessage( - hashes_message.clone(), - )).await.unwrap_or_else(|err| { - error!(peer_id = %format!("{:#x}", peer_id), err = ?err, "Failed to send transactions hashes"); - }); + connection.outgoing_message(hashes_message.clone()).await.unwrap_or_else(|err| { + error!(peer_id = %format!("{:#x}", peer_id), err = ?err, "Failed to send transactions hashes"); + }); } } Ok(()) diff --git a/crates/networking/p2p/types.rs b/crates/networking/p2p/types.rs index fa86145dcaa..1eb1c29357e 100644 --- a/crates/networking/p2p/types.rs +++ b/crates/networking/p2p/types.rs @@ -243,7 +243,9 @@ impl Display for Node { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(&format!( "{0}({1}:{2})", - self.public_key, self.ip, self.tcp_port + self.node_id(), + self.ip, + self.tcp_port )) } } diff --git a/crates/networking/p2p/utils.rs b/crates/networking/p2p/utils.rs index a4f97310c03..9b6ce315988 100644 --- a/crates/networking/p2p/utils.rs +++ b/crates/networking/p2p/utils.rs @@ -1,20 +1,11 @@ +use crate::peer_handler::DumpError; +use ethrex_common::{H256, H512, U256, types::AccountState, utils::keccak}; +use ethrex_rlp::encode::RLPEncode; +use secp256k1::{PublicKey, SecretKey}; use std::{ path::{Path, PathBuf}, time::{Duration, SystemTime, UNIX_EPOCH}, }; - -use ethrex_common::utils::keccak; -use ethrex_common::{H256, H512, U256, types::AccountState}; -use ethrex_rlp::{encode::RLPEncode, error::RLPDecodeError}; -use ethrex_trie::Node; -use secp256k1::{PublicKey, SecretKey}; -use spawned_concurrency::error::GenServerError; - -use crate::peer_handler::DumpError; -use crate::{ - discv4::peer_table::PeerChannels, - rlpx::{Message, connection::server::CastMessage, snap::TrieNodes}, -}; use tracing::error; /// Computes the node_id from a public key (aka computes the Keccak256 hash of the given public key) @@ -142,7 +133,7 @@ pub fn get_code_hashes_snapshot_file(directory: &Path, chunk_index: u64) -> Path pub fn dump_to_file(path: &Path, contents: Vec) -> Result<(), DumpError> { std::fs::write(path, &contents) - .inspect_err(|err| tracing::error!(%err, ?path, "Failed to dump snapshot to file")) + .inspect_err(|err| error!(%err, ?path, "Failed to dump snapshot to file")) .map_err(|err| DumpError { path: path.to_path_buf(), contents, @@ -216,87 +207,3 @@ pub fn dump_storages_to_file( .encode_to_vec(), ) } - -/// TODO: make it more generic -pub async fn send_message_and_wait_for_response( - peer_channel: &mut PeerChannels, - message: Message, - request_id: u64, -) -> Result, SendMessageError> { - let receiver = peer_channel - .receiver - .try_lock() - .map_err(|_| SendMessageError::PeerBusy)?; - peer_channel - .connection - .cast(CastMessage::BackendMessage(message)) - .await - .map_err(SendMessageError::GenServerError)?; - let nodes = tokio::time::timeout( - Duration::from_secs(7), - receive_trienodes(receiver, request_id), - ) - .await - .map_err(|_| SendMessageError::PeerTimeout)? - .ok_or(SendMessageError::PeerDisconnected)?; - - nodes - .nodes - .iter() - .map(|node| Node::decode_raw(node)) - .collect::, _>>() - .map_err(SendMessageError::RLPDecodeError) -} - -/// TODO: make it more generic -pub async fn send_trie_nodes_messages_and_wait_for_reply( - peer_channel: &mut PeerChannels, - message: Message, - request_id: u64, -) -> Result { - let receiver = peer_channel - .receiver - .try_lock() - .map_err(|_| SendMessageError::PeerBusy)?; - peer_channel - .connection - .cast(CastMessage::BackendMessage(message)) - .await - .map_err(SendMessageError::GenServerError)?; - tokio::time::timeout( - Duration::from_secs(7), - receive_trienodes(receiver, request_id), - ) - .await - .map_err(|_| SendMessageError::PeerTimeout)? - .ok_or(SendMessageError::PeerDisconnected) -} - -async fn receive_trienodes( - mut receiver: tokio::sync::MutexGuard<'_, spawned_rt::tasks::mpsc::Receiver>, - request_id: u64, -) -> Option { - loop { - let resp = receiver.recv().await?; - if let Message::TrieNodes(trie_nodes) = resp - && trie_nodes.id == request_id - { - return Some(trie_nodes); - } - } -} - -// TODO: find a better name for this type -#[derive(thiserror::Error, Debug)] -pub enum SendMessageError { - #[error("Peer timed out")] - PeerTimeout, - #[error("GenServerError")] - GenServerError(GenServerError), - #[error("Peer disconnected")] - PeerDisconnected, - #[error("Peer Busy")] - PeerBusy, - #[error("RLP decode error")] - RLPDecodeError(RLPDecodeError), -}