Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions cmd/ethrex/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ use ethrex_p2p::{
sync::SyncMode,
types::{Node, NodeRecord},
};
use ethrex_rlp::decode::RLPDecode;
use hex::FromHexError;
use secp256k1::{PublicKey, SecretKey};
use serde::{Deserialize, Serialize};
Expand Down Expand Up @@ -65,13 +64,6 @@ pub fn read_chain_file(chain_rlp_path: &str) -> Vec<Block> {
decode::chain_file(chain_file).expect("Failed to decode chain rlp file")
}

pub fn read_block_file(block_file_path: &str) -> Block {
let encoded_block = std::fs::read(block_file_path)
.unwrap_or_else(|_| panic!("Failed to read block file with path {block_file_path}"));
Block::decode(&encoded_block)
.unwrap_or_else(|_| panic!("Failed to decode block file {block_file_path}"))
}
Comment on lines -68 to -73
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I though the import command used this.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is done by read_chain_file, which is almost identical.


pub fn parse_sync_mode(s: &str) -> eyre::Result<SyncMode> {
match s {
"full" => Ok(SyncMode::Full),
Expand Down
27 changes: 0 additions & 27 deletions crates/common/serde_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ use serde::{Deserialize, Deserializer, Serializer, de::Error, ser::SerializeSeq}
pub mod u256 {
use super::*;
use ethereum_types::U256;
use serde_json::Number;

pub mod dec_str {
use super::*;
Expand All @@ -25,32 +24,6 @@ pub mod u256 {
}
}

pub fn deser_number<'de, D>(d: D) -> Result<U256, D::Error>
where
D: Deserializer<'de>,
{
let value = Number::deserialize(d)?.to_string();
U256::from_dec_str(&value).map_err(|e| D::Error::custom(e.to_string()))
}

pub fn deser_number_opt<'de, D>(d: D) -> Result<Option<U256>, D::Error>
where
D: Deserializer<'de>,
{
// Handle the null case explicitly
let opt = Option::<Number>::deserialize(d)?;
match opt {
Some(number) => {
// Convert number to string and parse to U256
let value = number.to_string();
U256::from_dec_str(&value)
.map(Some)
.map_err(|e| D::Error::custom(e.to_string()))
}
None => Ok(None),
}
}

pub fn deser_hex_str<'de, D>(d: D) -> Result<U256, D::Error>
where
D: Deserializer<'de>,
Expand Down
71 changes: 1 addition & 70 deletions crates/common/types/block_execution_witness.rs
Original file line number Diff line number Diff line change
@@ -1,24 +1,18 @@
use std::collections::{BTreeMap, BTreeSet};
use std::fmt;
use std::str::FromStr;

use crate::rkyv_utils::H160Wrapper;
use crate::types::{Block, Code};
use crate::{
constants::EMPTY_KECCACK_HASH,
types::{AccountState, AccountUpdate, BlockHeader, ChainConfig},
utils::decode_hex,
};
use bytes::Bytes;
use ethereum_types::{Address, H256, U256};
use ethrex_crypto::keccak::keccak_hash;
use ethrex_rlp::error::RLPDecodeError;
use ethrex_rlp::{decode::RLPDecode, encode::RLPEncode};
use ethrex_trie::{EMPTY_TRIE_HASH, Node, Trie, TrieError};
use rkyv::with::{Identity, MapKV};
use serde::de::{SeqAccess, Visitor};
use serde::ser::SerializeSeq;
use serde::{Deserialize, Deserializer, Serialize, Serializer, de};
use serde::{Deserialize, Serialize};

/// State produced by the guest program execution inside the zkVM. It is
/// essentially built from the `ExecutionWitness`.
Expand Down Expand Up @@ -460,69 +454,6 @@ impl GuestProgramState {
}
}

pub fn serialize_code<S>(map: &BTreeMap<H256, Bytes>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq_serializer = serializer.serialize_seq(Some(map.len()))?;
for (code_hash, code) in map {
let code_hash = format!("0x{}", hex::encode(code_hash));
let code = format!("0x{}", hex::encode(code));

let mut obj = serde_json::Map::new();
obj.insert(code_hash, serde_json::Value::String(code));

seq_serializer.serialize_element(&obj)?;
}
seq_serializer.end()
}

pub fn deserialize_code<'de, D>(deserializer: D) -> Result<BTreeMap<H256, Bytes>, D::Error>
where
D: Deserializer<'de>,
{
struct BytesVecVisitor;

impl<'de> Visitor<'de> for BytesVecVisitor {
type Value = BTreeMap<H256, Bytes>;

fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a list of hex-encoded strings")
}

fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut map = BTreeMap::new();

#[derive(Deserialize)]
struct CodeEntry(BTreeMap<String, String>);

while let Some(CodeEntry(entry)) = seq.next_element::<CodeEntry>()? {
if entry.len() != 1 {
return Err(de::Error::custom(
"Each object must contain exactly one key",
));
}

for (k, v) in entry {
let code_hash =
H256::from_str(k.trim_start_matches("0x")).map_err(de::Error::custom)?;

let bytecode =
decode_hex(v.trim_start_matches("0x")).map_err(de::Error::custom)?;

map.insert(code_hash, Bytes::from(bytecode));
}
}
Ok(map)
}
}

deserializer.deserialize_seq(BytesVecVisitor)
}

fn hash_address(address: &Address) -> Vec<u8> {
keccak_hash(address.to_fixed_bytes()).to_vec()
}
Expand Down
46 changes: 1 addition & 45 deletions crates/common/types/l2/fee_config.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use bytes::Bytes;
use ethereum_types::{Address, H256, U256};
use ethereum_types::Address;
use rkyv::{Archive, Deserialize as RDeserialize, Serialize as RSerialize};
use serde::{Deserialize, Serialize};

Expand Down Expand Up @@ -216,24 +216,6 @@ impl Decoder {
Ok(res)
}

pub fn get_u256(&mut self) -> Result<U256, DecoderError> {
let res = U256::from_big_endian(self.bytes.get(self.offset..self.offset + 32).ok_or(
DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()),
)?);
self.offset += 32;

Ok(res)
}

pub fn get_h256(&mut self) -> Result<H256, DecoderError> {
let res = H256::from_slice(self.bytes.get(self.offset..self.offset + 32).ok_or(
DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()),
)?);
self.offset += 32;

Ok(res)
}

pub fn get_u8(&mut self) -> Result<u8, DecoderError> {
let res = self
.bytes
Expand All @@ -246,23 +228,6 @@ impl Decoder {
Ok(*res)
}

pub fn get_u16(&mut self) -> Result<u16, DecoderError> {
let res = u16::from_be_bytes(
self.bytes
.get(self.offset..self.offset + 2)
.ok_or(DecoderError::FailedToDeserializeStateDiff(
"Not enough bytes".to_string(),
))?
.try_into()
.map_err(|_| {
DecoderError::FailedToDeserializeStateDiff("Cannot parse u16".to_string())
})?,
);
self.offset += 2;

Ok(res)
}

pub fn get_u64(&mut self) -> Result<u64, DecoderError> {
let res = u64::from_be_bytes(
self.bytes
Expand All @@ -279,13 +244,4 @@ impl Decoder {

Ok(res)
}

pub fn get_bytes(&mut self, size: usize) -> Result<Bytes, DecoderError> {
let res = self.bytes.get(self.offset..self.offset + size).ok_or(
DecoderError::FailedToDeserializeStateDiff("Not enough bytes".to_string()),
)?;
self.offset += size;

Ok(Bytes::copy_from_slice(res))
}
}
5 changes: 0 additions & 5 deletions crates/common/types/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -603,11 +603,6 @@ impl EIP4844Transaction {
self.rlp_encode_as_pooled_tx(&mut buf, blobs_bundle);
buf.len()
}
pub fn rlp_encode_as_pooled_tx_to_vec(&self, blobs_bundle: &BlobsBundle) -> Vec<u8> {
let mut buf = Vec::new();
self.rlp_encode_as_pooled_tx(&mut buf, blobs_bundle);
buf
}
}

impl RLPEncode for EIP7702Transaction {
Expand Down
7 changes: 0 additions & 7 deletions crates/l2/common/src/l1_messages.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,6 @@ pub fn get_l1_message_hash(msg: &L1Message) -> H256 {
keccak(msg.encode())
}

pub fn get_block_l1_message_hashes(receipts: &[Receipt]) -> Vec<H256> {
get_block_l1_messages(receipts)
.iter()
.map(get_l1_message_hash)
.collect()
}

pub fn get_block_l1_messages(receipts: &[Receipt]) -> Vec<L1Message> {
static L1MESSAGE_EVENT_SELECTOR: LazyLock<H256> =
LazyLock::new(|| keccak("L1Message(address,bytes32,uint256)".as_bytes()));
Expand Down
51 changes: 4 additions & 47 deletions crates/networking/p2p/peer_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,9 @@ use crate::{
rlpx::{
connection::server::PeerConnection,
error::PeerConnectionError,
eth::{
blocks::{
BLOCK_HEADER_LIMIT, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders,
HashOrNumber,
},
receipts::GetReceipts,
eth::blocks::{
BLOCK_HEADER_LIMIT, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders,
HashOrNumber,
},
message::Message as RLPxMessage,
p2p::{Capability, SUPPORTED_ETH_CAPABILITIES},
Expand All @@ -29,7 +26,7 @@ use crate::{
use bytes::Bytes;
use ethrex_common::{
BigEndianHash, H256, U256,
types::{AccountState, BlockBody, BlockHeader, Receipt, validate_block_body},
types::{AccountState, BlockBody, BlockHeader, validate_block_body},
};
use ethrex_rlp::{decode::RLPDecode, encode::RLPEncode};
use ethrex_storage::Store;
Expand Down Expand Up @@ -596,46 +593,6 @@ impl PeerHandler {
Ok(None)
}

/// Requests all receipts in a set of blocks from any suitable peer given their block hashes
/// Returns the lists of receipts or None if:
/// - There are no available peers (the node just started up or was rejected by all other nodes)
/// - No peer returned a valid response in the given time and retry limits
pub async fn request_receipts(
&mut self,
block_hashes: Vec<H256>,
) -> Result<Option<Vec<Vec<Receipt>>>, PeerHandlerError> {
let block_hashes_len = block_hashes.len();
for _ in 0..REQUEST_RETRY_ATTEMPTS {
let request_id = rand::random();
let request = RLPxMessage::GetReceipts(GetReceipts {
id: request_id,
block_hashes: block_hashes.clone(),
});
match self.get_random_peer(&SUPPORTED_ETH_CAPABILITIES).await? {
None => return Ok(None),
Some((peer_id, mut connection)) => {
if let Some(receipts) =
match PeerHandler::make_request(&mut self.peer_table, peer_id, &mut connection, request, PEER_REPLY_TIMEOUT).await {
Ok(RLPxMessage::Receipts68(res)) => {
Some(res.get_receipts())
}
Ok(RLPxMessage::Receipts69(res)) => {
Some(res.receipts.clone())
}
_ => None
}
.and_then(|receipts|
// Check that the response is not empty and does not contain more bodies than the ones requested
(!receipts.is_empty() && receipts.len() <= block_hashes_len).then_some(receipts))
{
return Ok(Some(receipts));
}
}
}
}
Ok(None)
}

/// Requests an account range from any suitable peer given the state trie's root and the starting hash and the limit hash.
/// Will also return a boolean indicating if there is more state to be fetched towards the right of the trie
/// (Note that the boolean will be true even if the remaining state is ouside the boundary set by the limit hash)
Expand Down
13 changes: 0 additions & 13 deletions crates/networking/p2p/rlpx/connection/codec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -239,19 +239,6 @@ impl Decoder for RLPxCodec {
)?))
}

fn decode_eof(&mut self, buf: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
match self.decode(buf)? {
Some(frame) => Ok(Some(frame)),
None => {
if buf.is_empty() {
Ok(None)
} else {
Err(std::io::Error::other("bytes remaining on stream").into())
}
}
}
}

fn framed<S: AsyncRead + AsyncWrite + Sized>(self, io: S) -> Framed<S, Self>
where
Self: Sized,
Expand Down
27 changes: 1 addition & 26 deletions crates/networking/p2p/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use ethrex_common::types::Code;
use ethrex_common::{
H256,
constants::{EMPTY_KECCACK_HASH, EMPTY_TRIE_HASH},
types::{AccountState, Block, BlockHash, BlockHeader},
types::{AccountState, Block, BlockHeader},
};
use ethrex_rlp::{decode::RLPDecode, encode::RLPEncode, error::RLPDecodeError};
use ethrex_storage::{Store, error::StoreError};
Expand Down Expand Up @@ -585,31 +585,6 @@ async fn store_block_bodies(
Ok(())
}

/// Fetches all receipts for the given block hashes via p2p and stores them
// TODO: remove allow when used again
#[allow(unused)]
async fn store_receipts(
mut block_hashes: Vec<BlockHash>,
mut peers: PeerHandler,
store: Store,
) -> Result<(), SyncError> {
loop {
debug!("Requesting Receipts ");
if let Some(receipts) = peers.request_receipts(block_hashes.clone()).await? {
debug!(" Received {} Receipts", receipts.len());
// Track which blocks we have already fetched receipts for
for (block_hash, receipts) in block_hashes.drain(0..receipts.len()).zip(receipts) {
store.add_receipts(block_hash, receipts).await?;
}
// Check if we need to ask for another batch
if block_hashes.is_empty() {
break;
}
}
}
Ok(())
}

Comment on lines -588 to -612
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This would be used when we address #1766 , maybe we should leave it and link the issue?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is #1766 going to be addressed soon? While that issue is not being addressed, I'd prefer to mention this thread in the issue and remove the code for now.

/// Persisted State during the Block Sync phase for SnapSync
#[derive(Clone)]
pub struct SnapBlockSyncState {
Expand Down
Loading