diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b1338c18ae..340df4fa39b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ ## Perf ### 2025-10-31 - +- Reduce overhead of trie opening [#5145](https://github.com/lambdaclass/ethrex/pull/5145) - Improved discovery and peer initialization [#5147](https://github.com/lambdaclass/ethrex/pull/5147) ### 2025-10-30 diff --git a/crates/blockchain/blockchain.rs b/crates/blockchain/blockchain.rs index 2757b13799c..6fe402f0c88 100644 --- a/crates/blockchain/blockchain.rs +++ b/crates/blockchain/blockchain.rs @@ -165,7 +165,7 @@ impl Blockchain { // Validate the block pre-execution validate_block(block, &parent_header, &chain_config, ELASTICITY_MULTIPLIER)?; - let vm_db = StoreVmDatabase::new(self.storage.clone(), block.header.parent_hash); + let vm_db = StoreVmDatabase::new(self.storage.clone(), parent_header); let mut vm = self.new_evm(vm_db)?; let execution_result = vm.execute_block(block)?; @@ -208,7 +208,7 @@ impl Blockchain { validate_block(block, &parent_header, &chain_config, ELASTICITY_MULTIPLIER)?; let block_validated_instant = Instant::now(); - let vm_db = StoreVmDatabase::new(self.storage.clone(), block.header.parent_hash); + let vm_db = StoreVmDatabase::new(self.storage.clone(), parent_header.clone()); let mut vm = self.new_evm(vm_db)?; let exec_merkle_start = Instant::now(); @@ -532,6 +532,11 @@ impl Blockchain { for (i, block) in blocks.iter().enumerate() { let parent_hash = block.header.parent_hash; + let parent_header = self + .storage + .get_block_header_by_hash(parent_hash) + .map_err(ChainError::StoreError)? + .ok_or(ChainError::ParentNotFound)?; // This assumes that the user has the necessary state stored already, // so if the user only has the state previous to the first block, it @@ -539,7 +544,7 @@ impl Blockchain { // doesn't fail, later in this function we store the new state after // re-execution. let vm_db: DynVmDatabase = - Box::new(StoreVmDatabase::new(self.storage.clone(), parent_hash)); + Box::new(StoreVmDatabase::new(self.storage.clone(), parent_header)); let logger = Arc::new(DatabaseLogger::new(Arc::new(Mutex::new(Box::new(vm_db))))); @@ -1007,9 +1012,14 @@ impl Blockchain { // Cache block hashes for the full batch so we can access them during execution without having to store the blocks beforehand let block_hash_cache = blocks.iter().map(|b| (b.header.number, b.hash())).collect(); + let parent_header = self + .storage + .get_block_header_by_hash(first_block_header.parent_hash) + .map_err(|e| (ChainError::StoreError(e), None))? + .ok_or((ChainError::ParentNotFound, None))?; let vm_db = StoreVmDatabase::new_with_block_hash_cache( self.storage.clone(), - first_block_header.parent_hash, + parent_header, block_hash_cache, ); let mut vm = self.new_evm(vm_db).map_err(|e| (e.into(), None))?; diff --git a/crates/blockchain/payload.rs b/crates/blockchain/payload.rs index 8a5fafab377..f054584e13c 100644 --- a/crates/blockchain/payload.rs +++ b/crates/blockchain/payload.rs @@ -235,7 +235,11 @@ impl PayloadBuildContext { .unwrap_or_default(), ); - let vm_db = StoreVmDatabase::new(storage.clone(), payload.header.parent_hash); + let parent_header = storage + .get_block_header_by_hash(payload.header.parent_hash) + .map_err(|e| EvmError::DB(e.to_string()))? + .ok_or_else(|| EvmError::DB("parent header not found".to_string()))?; + let vm_db = StoreVmDatabase::new(storage.clone(), parent_header); let vm = new_evm(blockchain_type, vm_db)?; let payload_size = payload.encode_to_vec().len() as u64; diff --git a/crates/blockchain/tracing.rs b/crates/blockchain/tracing.rs index 445b98ffe84..5f049ce183b 100644 --- a/crates/blockchain/tracing.rs +++ b/crates/blockchain/tracing.rs @@ -102,9 +102,13 @@ impl Blockchain { .iter() .map(|b| (b.header.number, b.hash())) .collect(); + let parent_header = self + .storage + .get_block_header_by_hash(parent_hash)? + .ok_or(ChainError::ParentNotFound)?; let vm_db = StoreVmDatabase::new_with_block_hash_cache( self.storage.clone(), - parent_hash, + parent_header, block_hash_cache, ); let mut vm = self.new_evm(vm_db)?; diff --git a/crates/blockchain/vm.rs b/crates/blockchain/vm.rs index 6a5b90cb28e..7a5def34969 100644 --- a/crates/blockchain/vm.rs +++ b/crates/blockchain/vm.rs @@ -1,7 +1,7 @@ use ethrex_common::{ Address, H256, U256, constants::EMPTY_KECCACK_HASH, - types::{AccountState, BlockHash, BlockNumber, ChainConfig, Code}, + types::{AccountState, BlockHash, BlockHeader, BlockNumber, ChainConfig, Code}, }; use ethrex_storage::Store; use ethrex_vm::{EvmError, VmDatabase}; @@ -16,26 +16,29 @@ pub struct StoreVmDatabase { // We use this when executing blocks in batches, as we will only add the blocks at the end // And may need to access hashes of blocks previously executed in the batch pub block_hash_cache: HashMap, + pub state_root: H256, } impl StoreVmDatabase { - pub fn new(store: Store, block_hash: BlockHash) -> Self { + pub fn new(store: Store, block_header: BlockHeader) -> Self { StoreVmDatabase { store, - block_hash, + block_hash: block_header.hash(), block_hash_cache: HashMap::new(), + state_root: block_header.state_root, } } pub fn new_with_block_hash_cache( store: Store, - block_hash: BlockHash, + block_header: BlockHeader, block_hash_cache: HashMap, ) -> Self { StoreVmDatabase { store, - block_hash, + block_hash: block_header.hash(), block_hash_cache, + state_root: block_header.state_root, } } } @@ -44,14 +47,14 @@ impl VmDatabase for StoreVmDatabase { #[instrument(level = "trace", name = "Account read", skip_all)] fn get_account_state(&self, address: Address) -> Result, EvmError> { self.store - .get_account_state_by_hash(self.block_hash, address) + .get_account_state_by_root(self.state_root, address) .map_err(|e| EvmError::DB(e.to_string())) } #[instrument(level = "trace", name = "Storage read", skip_all)] fn get_storage_slot(&self, address: Address, key: H256) -> Result, EvmError> { self.store - .get_storage_at_hash(self.block_hash, address, key) + .get_storage_at_root(self.state_root, address, key) .map_err(|e| EvmError::DB(e.to_string())) } diff --git a/crates/l2/based/block_fetcher.rs b/crates/l2/based/block_fetcher.rs index 26d78a5cb39..e6b1f137ec5 100644 --- a/crates/l2/based/block_fetcher.rs +++ b/crates/l2/based/block_fetcher.rs @@ -1,5 +1,6 @@ use std::{cmp::min, collections::HashMap, sync::Arc, time::Duration}; +use ethrex_blockchain::error::ChainError; use ethrex_blockchain::{Blockchain, fork_choice::apply_fork_choice, vm::StoreVmDatabase}; use ethrex_common::utils::keccak; use ethrex_common::{ @@ -359,7 +360,11 @@ impl BlockFetcher { // This is copied from the L1Committer, this should be reviewed. let mut acc_account_updates: HashMap = HashMap::new(); for block in batch { - let vm_db = StoreVmDatabase::new(self.store.clone(), block.header.parent_hash); + let parent_header = self + .store + .get_block_header_by_hash(block.header.parent_hash)? + .ok_or(BlockFetcherError::ChainError(ChainError::ParentNotFound))?; + let vm_db = StoreVmDatabase::new(self.store.clone(), parent_header); let mut vm = self.blockchain.new_evm(vm_db)?; vm.execute_block(block) .map_err(BlockFetcherError::EvmError)?; @@ -378,8 +383,12 @@ impl BlockFetcher { } let parent_block_hash = first_block.header.parent_hash; + let parent_header = self + .store + .get_block_header_by_hash(parent_block_hash)? + .ok_or(BlockFetcherError::ChainError(ChainError::ParentNotFound))?; - let parent_db = StoreVmDatabase::new(self.store.clone(), parent_block_hash); + let parent_db = StoreVmDatabase::new(self.store.clone(), parent_header); let state_diff = prepare_state_diff( last_block.header.clone(), diff --git a/crates/l2/sequencer/l1_committer.rs b/crates/l2/sequencer/l1_committer.rs index 792a020c871..0cdc5bfe9e7 100644 --- a/crates/l2/sequencer/l1_committer.rs +++ b/crates/l2/sequencer/l1_committer.rs @@ -11,7 +11,7 @@ use crate::{ }; use bytes::Bytes; use ethrex_blockchain::{ - Blockchain, BlockchainOptions, BlockchainType, L2Config, vm::StoreVmDatabase, + Blockchain, BlockchainOptions, BlockchainType, L2Config, error::ChainError, vm::StoreVmDatabase, }; use ethrex_common::{ Address, H256, U256, @@ -534,13 +534,14 @@ impl L1Committer { "Could not find execution cache result for block {}, falling back to re-execution", last_added_block_number + 1 ); + let parent_header = self + .store + .get_block_header_by_hash(potential_batch_block.header.parent_hash)? + .ok_or(CommitterError::ChainError(ChainError::ParentNotFound))?; // Here we use the checkpoint store because we need the previous // state available (i.e. not pruned) for re-execution. - let vm_db = StoreVmDatabase::new( - checkpoint_store.clone(), - potential_batch_block.header.parent_hash, - ); + let vm_db = StoreVmDatabase::new(checkpoint_store.clone(), parent_header); let fee_config = self .rollup_store @@ -603,9 +604,14 @@ impl L1Committer { ))? .parent_hash; + let parent_header = self + .store + .get_block_header_by_hash(parent_block_hash)? + .ok_or(CommitterError::ChainError(ChainError::ParentNotFound))?; + // Again, here the VM database should be instantiated from the checkpoint // store to have access to the previous state - let parent_db = StoreVmDatabase::new(checkpoint_store.clone(), parent_block_hash); + let parent_db = StoreVmDatabase::new(checkpoint_store.clone(), parent_header); let acc_privileged_txs_len: u64 = acc_privileged_txs.len().try_into()?; if acc_privileged_txs_len > PRIVILEGED_TX_BUDGET { diff --git a/crates/networking/rpc/eth/account.rs b/crates/networking/rpc/eth/account.rs index bcc6fd1f25c..749ba32f93b 100644 --- a/crates/networking/rpc/eth/account.rs +++ b/crates/networking/rpc/eth/account.rs @@ -136,8 +136,7 @@ impl RpcHandler for GetStorageAtRequest { let storage_value = context .storage - .get_storage_at(block_number, self.address, self.storage_slot) - .await? + .get_storage_at(block_number, self.address, self.storage_slot)? .unwrap_or_default(); let storage_value = H256::from_uint(&storage_value); serde_json::to_value(format!("{storage_value:#x}")) diff --git a/crates/networking/rpc/eth/transaction.rs b/crates/networking/rpc/eth/transaction.rs index 8b5e30e540d..de456083e05 100644 --- a/crates/networking/rpc/eth/transaction.rs +++ b/crates/networking/rpc/eth/transaction.rs @@ -347,7 +347,7 @@ impl RpcHandler for CreateAccessListRequest { _ => return Ok(Value::Null), }; - let vm_db = StoreVmDatabase::new(context.storage.clone(), header.hash()); + let vm_db = StoreVmDatabase::new(context.storage.clone(), header.clone()); let mut vm = context.blockchain.new_evm(vm_db)?; // Run transaction and obtain access list @@ -571,7 +571,7 @@ async fn simulate_tx( storage: Store, blockchain: Arc, ) -> Result { - let vm_db = StoreVmDatabase::new(storage, block_header.hash()); + let vm_db = StoreVmDatabase::new(storage, block_header.clone()); let mut vm = blockchain.new_evm(vm_db)?; match vm.simulate_tx_from_generic(transaction, block_header)? { diff --git a/crates/storage/api.rs b/crates/storage/api.rs index 5de722cd3e0..664f72612ae 100644 --- a/crates/storage/api.rs +++ b/crates/storage/api.rs @@ -380,4 +380,8 @@ pub trait StoreEngine: Debug + Send + Sync + RefUnwindSafe { fn generate_flatkeyvalue(&self) -> Result<(), StoreError>; async fn create_checkpoint(&self, path: &Path) -> Result<(), StoreError>; + + fn flatkeyvalue_computed(&self, _account: H256) -> Result { + Ok(false) + } } diff --git a/crates/storage/store.rs b/crates/storage/store.rs index 118d9035953..75c97ce1073 100644 --- a/crates/storage/store.rs +++ b/crates/storage/store.rs @@ -712,27 +712,39 @@ impl Store { self.engine.get_block_by_number(block_number).await } - pub async fn get_storage_at( + pub fn get_storage_at( &self, block_number: BlockNumber, address: Address, storage_key: H256, ) -> Result, StoreError> { - match self.get_canonical_block_hash(block_number).await? { - Some(block_hash) => self.get_storage_at_hash(block_hash, address, storage_key), + match self.get_block_header(block_number)? { + Some(header) => self.get_storage_at_root(header.state_root, address, storage_key), None => Ok(None), } } - pub fn get_storage_at_hash( + pub fn get_storage_at_root( &self, - block_hash: BlockHash, + state_root: H256, address: Address, storage_key: H256, ) -> Result, StoreError> { - let Some(storage_trie) = self.storage_trie(block_hash, address)? else { - return Ok(None); + let hashed_address = hash_address(&address); + let account_hash = H256::from_slice(&hashed_address); + let storage_root = if self.engine.flatkeyvalue_computed(account_hash)? { + // We will use FKVs, we don't need the root + *EMPTY_TRIE_HASH + } else { + let state_trie = self.open_state_trie(state_root)?; + let Some(encoded_account) = state_trie.get(&hashed_address)? else { + return Ok(None); + }; + let account = AccountState::decode(&encoded_account)?; + account.storage_root }; + let storage_trie = self.open_storage_trie(account_hash, storage_root, state_root)?; + let hashed_key = hash_key(&storage_key); storage_trie .get(&hashed_key)? @@ -883,14 +895,12 @@ impl Store { get_account_state_from_trie(&state_trie, address) } - pub fn get_account_state_by_hash( + pub fn get_account_state_by_root( &self, - block_hash: BlockHash, + state_root: H256, address: Address, ) -> Result, StoreError> { - let Some(state_trie) = self.state_trie(block_hash)? else { - return Ok(None); - }; + let state_trie = self.open_state_trie(state_root)?; self.get_account_state_from_trie(&state_trie, address) } diff --git a/crates/storage/store_db/rocksdb.rs b/crates/storage/store_db/rocksdb.rs index f3e000cbdc5..faae647d248 100644 --- a/crates/storage/store_db/rocksdb.rs +++ b/crates/storage/store_db/rocksdb.rs @@ -142,6 +142,7 @@ pub struct Store { trie_cache: Arc>>, flatkeyvalue_control_tx: std::sync::mpsc::SyncSender, trie_update_worker_tx: TriedUpdateWorkerTx, + last_computed_flatkeyvalue: Arc>>, } impl Store { @@ -342,11 +343,23 @@ impl Store { let (fkv_tx, fkv_rx) = std::sync::mpsc::sync_channel(0); let (trie_upd_tx, trie_upd_rx) = std::sync::mpsc::sync_channel(0); + let cf_misc = db + .cf_handle(CF_MISC_VALUES) + .ok_or_else(|| StoreError::Custom("column not found".to_string()))?; + let mut last_written = db + .get_cf(&cf_misc, "last_written")? + .unwrap_or_else(|| vec![0u8; 64]); + if last_written == vec![0xff] { + last_written = vec![0xff; 64]; + } + drop(cf_misc); // dropped to remove borrow on db + let store = Self { db: Arc::new(db), trie_cache: Default::default(), flatkeyvalue_control_tx: fkv_tx, trie_update_worker_tx: trie_upd_tx, + last_computed_flatkeyvalue: Arc::new(Mutex::new(last_written)), }; let store_clone = store.clone(); std::thread::spawn(move || { @@ -608,6 +621,10 @@ impl Store { ctr += 1; if ctr > 10_000 { self.db.write(std::mem::take(&mut batch))?; + *self + .last_computed_flatkeyvalue + .lock() + .map_err(|_| StoreError::LockError)? = path.as_ref().to_vec(); } let mut iter_inner = self @@ -627,6 +644,10 @@ impl Store { ctr += 1; if ctr > 10_000 { self.db.write(std::mem::take(&mut batch))?; + *self + .last_computed_flatkeyvalue + .lock() + .map_err(|_| StoreError::LockError)? = key.as_ref().to_vec(); } if let Ok(value) = control_rx.try_recv() { match value { @@ -665,6 +686,10 @@ impl Store { Ok(()) => { batch.put_cf(&cf_misc, "last_written", [0xff]); self.db.write(batch)?; + *self + .last_computed_flatkeyvalue + .lock() + .map_err(|_| StoreError::LockError)? = vec![0xff; 64]; return Ok(()); } }; @@ -748,6 +773,14 @@ impl Store { *trie_cache.lock().map_err(|_| StoreError::LockError)? = Arc::new(trie_mut); Ok(()) } + + fn last_written(&self) -> Result, StoreError> { + let last_computed_flatkeyvalue = self + .last_computed_flatkeyvalue + .lock() + .map_err(|_| StoreError::LockError)?; + Ok(last_computed_flatkeyvalue.clone()) + } } #[async_trait::async_trait] @@ -1437,7 +1470,12 @@ impl StoreEngine for Store { state_root: H256, ) -> Result { // FIXME: use a DB snapshot here - let db = Box::new(RocksDBTrieDB::new(self.db.clone(), CF_TRIE_NODES, None)?); + let db = Box::new(RocksDBTrieDB::new( + self.db.clone(), + CF_TRIE_NODES, + None, + self.last_written()?, + )?); let wrap_db = Box::new(TrieWrapper { state_root, inner: self @@ -1453,7 +1491,12 @@ impl StoreEngine for Store { fn open_state_trie(&self, state_root: H256) -> Result { // FIXME: use a DB snapshot here - let db = Box::new(RocksDBTrieDB::new(self.db.clone(), CF_TRIE_NODES, None)?); + let db = Box::new(RocksDBTrieDB::new( + self.db.clone(), + CF_TRIE_NODES, + None, + self.last_written()?, + )?); let wrap_db = Box::new(TrieWrapper { state_root, inner: self @@ -1476,12 +1519,18 @@ impl StoreEngine for Store { self.db.clone(), CF_TRIE_NODES, Some(hashed_address), + self.last_written()?, )?); Ok(Trie::open(db, storage_root)) } fn open_direct_state_trie(&self, state_root: H256) -> Result { - let db = Box::new(RocksDBTrieDB::new(self.db.clone(), CF_TRIE_NODES, None)?); + let db = Box::new(RocksDBTrieDB::new( + self.db.clone(), + CF_TRIE_NODES, + None, + self.last_written()?, + )?); Ok(Trie::open(db, state_root)) } @@ -1490,6 +1539,7 @@ impl StoreEngine for Store { self.db.clone(), CF_TRIE_NODES, None, + self.last_written()?, )?); let wrap_db = Box::new(TrieWrapper { state_root, @@ -1514,6 +1564,7 @@ impl StoreEngine for Store { self.db.clone(), CF_TRIE_NODES, None, + self.last_written()?, )?); let wrap_db = Box::new(TrieWrapper { state_root, @@ -1901,6 +1952,12 @@ impl StoreEngine for Store { Ok(()) } + + fn flatkeyvalue_computed(&self, account: H256) -> Result { + let account_nibbles = Nibbles::from_bytes(account.as_bytes()); + let last_computed_flatkeyvalue = self.last_written()?; + Ok(&last_computed_flatkeyvalue[0..64] > account_nibbles.as_ref()) + } } /// Open column families diff --git a/crates/storage/trie_db/rocksdb.rs b/crates/storage/trie_db/rocksdb.rs index acf852f60b9..629f8044666 100644 --- a/crates/storage/trie_db/rocksdb.rs +++ b/crates/storage/trie_db/rocksdb.rs @@ -4,10 +4,7 @@ use ethrex_trie::{Nibbles, Node, TrieDB, error::TrieError}; use rocksdb::{DBWithThreadMode, MultiThreaded}; use std::sync::Arc; -use crate::{ - store_db::rocksdb::{CF_FLATKEYVALUE, CF_MISC_VALUES}, - trie_db::layering::apply_prefix, -}; +use crate::{store_db::rocksdb::CF_FLATKEYVALUE, trie_db::layering::apply_prefix}; /// RocksDB implementation for the TrieDB trait, with get and put operations. pub struct RocksDBTrieDB { @@ -26,6 +23,7 @@ impl RocksDBTrieDB { db: Arc>, cf_name: &str, address_prefix: Option, + last_written: Vec, ) -> Result { // Verify column family exists if db.cf_handle(cf_name).is_none() { @@ -34,15 +32,7 @@ impl RocksDBTrieDB { cf_name ))); } - let cf_misc = db - .cf_handle(CF_MISC_VALUES) - .ok_or_else(|| TrieError::DbError(anyhow::anyhow!("Column family not found")))?; - let last_computed_flatkeyvalue = db - .get_cf(&cf_misc, "last_written") - .map_err(|e| TrieError::DbError(anyhow::anyhow!("Error reading last_written: {e}")))? - .map(|v| Nibbles::from_hex(v.to_vec())) - .unwrap_or_default(); - drop(cf_misc); + let last_computed_flatkeyvalue = Nibbles::from_hex(last_written); Ok(Self { db, @@ -155,18 +145,17 @@ mod tests { db_options.create_missing_column_families(true); let cf_descriptor = ColumnFamilyDescriptor::new("test_cf", Options::default()); - let cf_misc = ColumnFamilyDescriptor::new(CF_MISC_VALUES, Options::default()); let cf_fkv = ColumnFamilyDescriptor::new(CF_FLATKEYVALUE, Options::default()); let db = DBWithThreadMode::::open_cf_descriptors( &db_options, db_path, - vec![cf_descriptor, cf_misc, cf_fkv], + vec![cf_descriptor, cf_fkv], ) .unwrap(); let db = Arc::new(db); // Create TrieDB - let trie_db = RocksDBTrieDB::new(db, "test_cf", None).unwrap(); + let trie_db = RocksDBTrieDB::new(db, "test_cf", None, vec![]).unwrap(); // Test data let node_hash = Nibbles::from_hex(vec![1]); @@ -196,20 +185,19 @@ mod tests { db_options.create_if_missing(true); db_options.create_missing_column_families(true); - let cf_misc = ColumnFamilyDescriptor::new(CF_MISC_VALUES, Options::default()); let cf_descriptor = ColumnFamilyDescriptor::new("test_cf", Options::default()); let cf_fkv = ColumnFamilyDescriptor::new(CF_FLATKEYVALUE, Options::default()); let db = DBWithThreadMode::::open_cf_descriptors( &db_options, db_path, - vec![cf_descriptor, cf_misc, cf_fkv], + vec![cf_descriptor, cf_fkv], ) .unwrap(); let db = Arc::new(db); // Create TrieDB with address prefix let address = H256::from([0xaa; 32]); - let trie_db = RocksDBTrieDB::new(db, "test_cf", Some(address)).unwrap(); + let trie_db = RocksDBTrieDB::new(db, "test_cf", Some(address), vec![]).unwrap(); // Test data let node_hash = Nibbles::from_hex(vec![1]); @@ -235,19 +223,18 @@ mod tests { db_options.create_if_missing(true); db_options.create_missing_column_families(true); - let cf_misc = ColumnFamilyDescriptor::new(CF_MISC_VALUES, Options::default()); let cf_descriptor = ColumnFamilyDescriptor::new("test_cf", Options::default()); let cf_fkv = ColumnFamilyDescriptor::new(CF_FLATKEYVALUE, Options::default()); let db = DBWithThreadMode::::open_cf_descriptors( &db_options, db_path, - vec![cf_descriptor, cf_misc, cf_fkv], + vec![cf_descriptor, cf_fkv], ) .unwrap(); let db = Arc::new(db); // Create TrieDB - let trie_db = RocksDBTrieDB::new(db, "test_cf", None).unwrap(); + let trie_db = RocksDBTrieDB::new(db, "test_cf", None, vec![]).unwrap(); // Test data // NOTE: we don't use the same paths to avoid overwriting in the batch diff --git a/crates/storage/trie_db/rocksdb_locked.rs b/crates/storage/trie_db/rocksdb_locked.rs index 3f7b83f692e..274bcf1b19e 100644 --- a/crates/storage/trie_db/rocksdb_locked.rs +++ b/crates/storage/trie_db/rocksdb_locked.rs @@ -3,10 +3,7 @@ use ethrex_trie::{Nibbles, TrieDB, error::TrieError}; use rocksdb::{DBWithThreadMode, MultiThreaded, SnapshotWithThreadMode}; use std::sync::Arc; -use crate::{ - store_db::rocksdb::{CF_FLATKEYVALUE, CF_MISC_VALUES}, - trie_db::layering::apply_prefix, -}; +use crate::{store_db::rocksdb::CF_FLATKEYVALUE, trie_db::layering::apply_prefix}; /// RocksDB locked implementation for the TrieDB trait, read-only with consistent snapshot. pub struct RocksDBLockedTrieDB { @@ -28,6 +25,7 @@ impl RocksDBLockedTrieDB { db: Arc>, cf_name: &str, address_prefix: Option, + last_written: Vec, ) -> Result { // Leak the database reference to get 'static lifetime let db = Box::leak(Box::new(db)); @@ -41,15 +39,7 @@ impl RocksDBLockedTrieDB { TrieError::DbError(anyhow::anyhow!("Column family not found: {}", cf_name)) })?; - let cf_misc = db - .cf_handle(CF_MISC_VALUES) - .ok_or_else(|| TrieError::DbError(anyhow::anyhow!("Column family not found")))?; - let last_computed_flatkeyvalue = db - .get_cf(&cf_misc, "last_written") - .map_err(|e| TrieError::DbError(anyhow::anyhow!("Error reading last_written: {e}")))? - .map(|v| Nibbles::from_hex(v.to_vec())) - .unwrap_or_default(); - drop(cf_misc); + let last_computed_flatkeyvalue = Nibbles::from_hex(last_written); // Create snapshot for consistent reads let snapshot = db.snapshot(); diff --git a/crates/vm/levm/bench/revm_comparison/src/levm_bench.rs b/crates/vm/levm/bench/revm_comparison/src/levm_bench.rs index 8aa1fd71c0e..dda4818b10c 100644 --- a/crates/vm/levm/bench/revm_comparison/src/levm_bench.rs +++ b/crates/vm/levm/bench/revm_comparison/src/levm_bench.rs @@ -1,9 +1,9 @@ use bytes::Bytes; use ethrex_blockchain::vm::StoreVmDatabase; -use ethrex_common::H256; use ethrex_common::{ Address, U256, - types::{Account, Code, EIP1559Transaction, Transaction, TxKind}, + constants::EMPTY_TRIE_HASH, + types::{Account, BlockHeader, Code, EIP1559Transaction, Transaction, TxKind}, }; use ethrex_levm::errors::VMError; use ethrex_levm::{ @@ -53,7 +53,11 @@ pub fn run_with_levm(contract_code: &str, runs: u64, calldata: &str) { fn init_db(bytecode: Bytes) -> GeneralizedDatabase { // The store type for this bench shouldn't matter as all operations use the LEVM cache let in_memory_db = Store::new("", ethrex_storage::EngineType::InMemory).unwrap(); - let store: DynVmDatabase = Box::new(StoreVmDatabase::new(in_memory_db, H256::zero())); + let header = BlockHeader { + state_root: *EMPTY_TRIE_HASH, + ..Default::default() + }; + let store: DynVmDatabase = Box::new(StoreVmDatabase::new(in_memory_db, header)); let mut cache = FxHashMap::default(); cache.insert( diff --git a/crates/vm/levm/runner/src/main.rs b/crates/vm/levm/runner/src/main.rs index ed529d4d9b2..5ffa08a2757 100644 --- a/crates/vm/levm/runner/src/main.rs +++ b/crates/vm/levm/runner/src/main.rs @@ -3,8 +3,9 @@ use clap::Parser; use env_logger::Env; use ethrex_blockchain::vm::StoreVmDatabase; use ethrex_common::{ - Address, H160, H256, U256, - types::{Account, Code, LegacyTransaction, Transaction}, + Address, H160, U256, + constants::EMPTY_TRIE_HASH, + types::{Account, BlockHeader, Code, LegacyTransaction, Transaction}, }; use ethrex_levm::{ EVMConfig, Environment, @@ -142,7 +143,11 @@ fn main() { // DB let initial_state = setup_initial_state(&mut runner_input, bytecode); let in_memory_db = Store::new("", ethrex_storage::EngineType::InMemory).unwrap(); - let store: DynVmDatabase = Box::new(StoreVmDatabase::new(in_memory_db, H256::zero())); + let header = BlockHeader { + state_root: *EMPTY_TRIE_HASH, + ..Default::default() + }; + let store: DynVmDatabase = Box::new(StoreVmDatabase::new(in_memory_db, header)); let mut db = GeneralizedDatabase::new_with_account_state(Arc::new(store), initial_state); // Initialize VM diff --git a/docs/developers/l1/importing-blocks.md b/docs/developers/l1/importing-blocks.md index aed399cddd5..0ec6d3a3b0d 100644 --- a/docs/developers/l1/importing-blocks.md +++ b/docs/developers/l1/importing-blocks.md @@ -96,7 +96,7 @@ As mentioned in the previous point, the VM execution doesn't directly mutate the This is a key piece of code in `Blockchain.execute_block`: ```rust -let vm_db = StoreVmDatabase::new(self.storage.clone(), block.header.parent_hash); +let vm_db = StoreVmDatabase::new(self.storage.clone(), parent_header); let mut vm = Evm::new(vm_db); let execution_result = vm.execute_block(block)?; let account_updates = vm.get_state_transitions()?; diff --git a/tooling/ef_tests/blockchain/test_runner.rs b/tooling/ef_tests/blockchain/test_runner.rs index 2aab076982d..d630339c655 100644 --- a/tooling/ef_tests/blockchain/test_runner.rs +++ b/tooling/ef_tests/blockchain/test_runner.rs @@ -358,7 +358,6 @@ async fn check_poststate_against_db(test_key: &str, test: &TestUnit, db: &Store) for (key, value) in expected_account.storage { let db_storage_value = db .get_storage_at(latest_block_number, *addr, key) - .await .expect("Failed to read from DB") .unwrap_or_else(|| { panic!("Storage missing for address {addr} key {key} in DB test:{test_key}") diff --git a/tooling/ef_tests/state/utils.rs b/tooling/ef_tests/state/utils.rs index 08c7ca2c953..45683308c69 100644 --- a/tooling/ef_tests/state/utils.rs +++ b/tooling/ef_tests/state/utils.rs @@ -22,7 +22,7 @@ pub async fn load_initial_state_revm(test: &EFTest) -> (RevmState, H256, Store) let vm_db: DynVmDatabase = Box::new(StoreVmDatabase::new( storage.clone(), - genesis.get_block().hash(), + genesis.get_block().header, )); (revm_state(vm_db), genesis.get_block().hash(), storage) @@ -35,9 +35,7 @@ pub async fn load_initial_state_levm(test: &EFTest) -> GeneralizedDatabase { let mut storage = Store::new("./temp", EngineType::InMemory).expect("Failed to create Store"); storage.add_initial_state(genesis.clone()).await.unwrap(); - let block_hash = genesis.get_block().hash(); - - let store: DynVmDatabase = Box::new(StoreVmDatabase::new(storage, block_hash)); + let store: DynVmDatabase = Box::new(StoreVmDatabase::new(storage, genesis.get_block().header)); GeneralizedDatabase::new(Arc::new(store)) } diff --git a/tooling/ef_tests/state_v2/src/modules/utils.rs b/tooling/ef_tests/state_v2/src/modules/utils.rs index 076ebcda185..1dfa3497bf6 100644 --- a/tooling/ef_tests/state_v2/src/modules/utils.rs +++ b/tooling/ef_tests/state_v2/src/modules/utils.rs @@ -45,7 +45,10 @@ pub async fn load_initial_state( storage.add_initial_state(genesis.clone()).await.unwrap(); let block_hash = genesis.get_block().hash(); - let store: DynVmDatabase = Box::new(StoreVmDatabase::new(storage.clone(), block_hash)); + let store: DynVmDatabase = Box::new(StoreVmDatabase::new( + storage.clone(), + genesis.get_block().header, + )); // We return some values that will be needed to calculate the post execution checks (original storage, genesis and blockhash) (