Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
328932d
Add checkpointer
Oct 21, 2025
da7a646
Add L2 block production
Oct 21, 2025
9bd8506
Add a single tx for each produced block
Oct 21, 2025
6aaba68
Add initial checkpoint generation logic
Oct 21, 2025
2c7432d
Add comments
Oct 22, 2025
004632f
Include 7200 blocks batch
Oct 22, 2025
2912e28
Update create_checkpoint
Oct 22, 2025
bb46d61
Update apply_account_updates_from_trie_with_witness
Oct 22, 2025
51582a8
Update generate_witness_for_blocks
Oct 22, 2025
cc50ea5
Update naming
Oct 22, 2025
2c3a1e8
Merge branch 'main' of github.com:lambdaclass/ethrex into checkpointer
Oct 23, 2025
cbc8d53
Use checkpoints on committer
Oct 23, 2025
76abb2f
Update checkpointer lock
Oct 23, 2025
cf1aac5
Fix
Oct 23, 2025
6ecc127
Remove needless apply_forkchoice call over the one_time_checkpoint_store
Oct 23, 2025
930b778
Fix generate_witness_for_blocks
Oct 23, 2025
9c694fb
Cleanup
Oct 23, 2025
aba49f6
Pass checkpoint dir to L1Committer
Oct 23, 2025
067b628
Untrack checkpointer bin
Oct 24, 2025
301daf3
Merge branch 'main' of github.com:lambdaclass/ethrex into checkpointer
Oct 24, 2025
6ae9d1b
Update crates/storage/store.rs
ilitteri Oct 24, 2025
3866ca1
Update crates/l2/sequencer/l1_committer.rs
ilitteri Oct 24, 2025
1663345
cargo fmt
Oct 24, 2025
b757259
Increase fd limit
Oct 24, 2025
f2ea803
Improve error message
Oct 24, 2025
5a5f7d8
Fix
Oct 24, 2025
7098fb0
Add caution comment
Oct 24, 2025
bfcadbe
Improve comment
Oct 24, 2025
14197f1
Fix
Oct 24, 2025
8001fae
Merge branch 'main' of github.com:lambdaclass/ethrex into checkpointer
Oct 24, 2025
934683b
Fix
Oct 24, 2025
8a29d11
Remove apply forkchoice by keeping a hashes map
Oct 27, 2025
a954b36
Fix
Oct 27, 2025
1948a88
Fix
Oct 27, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/ethrex/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ c-kzg = [
"ethrex-crypto/c-kzg",
]
metrics = ["ethrex-blockchain/metrics", "ethrex-l2?/metrics"]
rocksdb = ["ethrex-storage/rocksdb", "ethrex-p2p/rocksdb"]
rocksdb = ["ethrex-storage/rocksdb", "ethrex-p2p/rocksdb", "ethrex-l2?/rocksdb"]
jemalloc = ["dep:tikv-jemallocator"]
jemalloc_profiling = [
"jemalloc",
Expand Down
116 changes: 113 additions & 3 deletions cmd/ethrex/l2/initializers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@ use crate::l2::{L2Options, SequencerOptions};
use crate::utils::{
NodeConfigFile, get_client_version, init_datadir, read_jwtsecret_file, store_node_config_file,
};
use ethrex_blockchain::{Blockchain, BlockchainType, L2Config};
use ethrex_blockchain::{Blockchain, BlockchainOptions, BlockchainType, L2Config};
use ethrex_common::fd_limit::raise_fd_limit;
use ethrex_common::types::Genesis;
use ethrex_common::types::fee_config::{FeeConfig, L1FeeConfig, OperatorFeeConfig};
use ethrex_common::{Address, types::DEFAULT_BUILDER_GAS_CEIL};
use ethrex_l2::SequencerConfig;
Expand All @@ -18,6 +20,8 @@ use ethrex_p2p::{
sync_manager::SyncManager,
types::{Node, NodeRecord},
};
#[cfg(feature = "rocksdb")]
use ethrex_storage::EngineType;
use ethrex_storage::Store;
use ethrex_storage_rollup::{EngineTypeRollup, StoreRollup};
use secp256k1::SecretKey;
Expand Down Expand Up @@ -146,14 +150,19 @@ pub async fn init_l2(
opts: L2Options,
log_filter_handler: Option<reload::Handle<EnvFilter, Registry>>,
) -> eyre::Result<()> {
raise_fd_limit()?;

let datadir = opts.node_opts.datadir.clone();
init_datadir(&opts.node_opts.datadir);
let rollup_store_dir = datadir.join("rollup_store");

// Checkpoints are stored in the main datadir
let checkpoints_dir = datadir.clone();

let network = get_network(&opts.node_opts);

let genesis = network.get_genesis()?;
let store = init_store(&datadir, genesis).await;
let store = init_store(&datadir, genesis.clone()).await;
let rollup_store = init_rollup_store(&rollup_store_dir).await;

let operator_fee_config = get_operator_fee_config(&opts.sequencer_opts).await?;
Expand All @@ -180,10 +189,18 @@ pub async fn init_l2(
perf_logs_enabled: true,
};

let blockchain = init_blockchain(store.clone(), blockchain_opts);
let blockchain = init_blockchain(store.clone(), blockchain_opts.clone());

regenerate_head_state(&store, &blockchain).await?;

let (initial_checkpoint_store, initial_checkpoint_blockchain) = initialize_checkpoint(
&store,
&checkpoints_dir.join("initial_checkpoint"),
genesis.clone(),
blockchain_opts,
)
.await?;

let signer = get_signer(&datadir);

let local_p2p_node = get_local_p2p_node(&opts.node_opts, &signer);
Expand Down Expand Up @@ -277,6 +294,10 @@ pub async fn init_l2(
"http://{}:{}",
opts.node_opts.http_addr, opts.node_opts.http_port
),
initial_checkpoint_store,
initial_checkpoint_blockchain,
genesis,
checkpoints_dir,
)
.into_future();

Expand Down Expand Up @@ -344,3 +365,92 @@ pub async fn get_operator_fee_config(
};
Ok(operator_fee_config)
}

/// Initializes a checkpoint of the given store at the specified path.
///
/// If there's no previous checkpoint, it creates one from the current store state.
///
/// This function performs the following steps:
/// 1. Creates a checkpoint of the provided store at the specified path.
/// 2. Initializes a new store and blockchain for the checkpoint.
/// 3. Regenerates the head state in the checkpoint store.
/// 4. Validates that the checkpoint store's head block number and latest block match those of the original store.
async fn initialize_checkpoint(
store: &Store,
path: &Path,
genesis: Genesis,
blockchain_opts: BlockchainOptions,
) -> eyre::Result<(Store, Arc<Blockchain>)> {
// If the checkpoint is not present, create it
if !path.exists() {
store.create_checkpoint(path).await?;
}

// We now load the checkpoint, validate it, and regenerate its state.

#[cfg(feature = "rocksdb")]
let engine_type = EngineType::RocksDB;
#[cfg(not(feature = "rocksdb"))]
let engine_type = EngineType::InMemory;

let checkpoint_store = {
let checkpoint_store_inner = Store::new(path, engine_type)?;

checkpoint_store_inner
.add_initial_state(genesis.clone())
.await?;

checkpoint_store_inner
};

let checkpoint_blockchain =
Arc::new(Blockchain::new(checkpoint_store.clone(), blockchain_opts));

let checkpoint_head_block_number = checkpoint_store.get_latest_block_number().await?;

let db_head_block_number = store.get_latest_block_number().await?;

if checkpoint_head_block_number != db_head_block_number {
return Err(eyre::eyre!(
"checkpoint store head block number does not match main store head block number before regeneration"
));
}

regenerate_head_state(&checkpoint_store, &checkpoint_blockchain).await?;

let checkpoint_latest_block_number = checkpoint_store.get_latest_block_number().await?;

let db_latest_block_number = store.get_latest_block_number().await?;

let checkpoint_latest_block_header = checkpoint_store
.get_block_header(checkpoint_latest_block_number)?
.ok_or(eyre::eyre!(
"latest block header ({checkpoint_latest_block_number}) not found in checkpoint store"
))?;

let db_latest_block_header = store
.get_block_header(db_latest_block_number)?
.ok_or(eyre::eyre!("latest block not found in main store"))?;

// Final sanity check

if !checkpoint_store.has_state_root(checkpoint_latest_block_header.state_root)? {
return Err(eyre::eyre!(
"checkpoint store state is not regenerated properly"
));
}

if checkpoint_latest_block_number != db_head_block_number {
return Err(eyre::eyre!(
"checkpoint store latest block number does not match main store head block number after regeneration"
));
}

if checkpoint_latest_block_header.state_root != db_latest_block_header.state_root {
return Err(eyre::eyre!(
"checkpoint store latest block hash does not match main store latest block hash after regeneration"
));
}

Ok((checkpoint_store, checkpoint_blockchain))
}
106 changes: 91 additions & 15 deletions crates/blockchain/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,13 +201,25 @@ impl Blockchain {
blocks: &[Block],
fee_configs: Option<&[FeeConfig]>,
) -> Result<ExecutionWitness, ChainError> {
let first_block_header = blocks
let first_block_header = &blocks
.first()
.ok_or(ChainError::WitnessGeneration(
"Empty block batch".to_string(),
))?
.header
.clone();
.header;

// Later on, we need to access block hashes by number. To avoid needing
// to apply fork choice for each block, we cache them here.
let mut block_hashes_map: BTreeMap<u64, H256> = blocks
.iter()
.cloned()
.map(|block| (block.header.number, block.hash()))
.collect();

block_hashes_map.insert(
first_block_header.number.saturating_sub(1),
first_block_header.parent_hash,
);

// Get state at previous block
let trie = self
Expand All @@ -216,7 +228,17 @@ impl Blockchain {
.map_err(|_| ChainError::ParentStateNotFound)?
.ok_or(ChainError::ParentStateNotFound)?;

let (state_trie_witness, mut trie) = TrieLogger::open_trie(trie);
let (mut current_trie_witness, mut trie) = TrieLogger::open_trie(trie);

// For each block, a new TrieLogger will be opened, each containing the
// witness accessed during the block execution. We need to accumulate
// all the nodes accessed during the entire batch execution.
let mut accumulated_state_trie_witness = current_trie_witness
.lock()
.map_err(|_| {
ChainError::WitnessGeneration("Failed to lock state trie witness".to_string())
})?
.clone();

let mut touched_account_storage_slots = BTreeMap::new();
// This will become the state trie + storage trie
Expand All @@ -232,10 +254,18 @@ impl Blockchain {

for (i, block) in blocks.iter().enumerate() {
let parent_hash = block.header.parent_hash;

// This assumes that the user has the necessary state stored already,
// so if the user only has the state previous to the first block, it
// will fail in the second iteration of this for loop. To ensure this,
// doesn't fail, later in this function we store the new state after
// re-execution.
let vm_db: DynVmDatabase =
Box::new(StoreVmDatabase::new(self.storage.clone(), parent_hash));

let logger = Arc::new(DatabaseLogger::new(Arc::new(Mutex::new(Box::new(vm_db)))));
let mut vm = match &self.options.r#type {

let mut vm = match self.options.r#type {
BlockchainType::L1 => Evm::new_from_db_for_l1(logger.clone()),
BlockchainType::L2(_) => {
let l2_config = match fee_configs {
Expand All @@ -253,7 +283,8 @@ impl Blockchain {
};

// Re-execute block with logger
vm.execute_block(block)?;
let execution_result = vm.execute_block(block)?;

// Gather account updates
let account_updates = vm.get_state_transitions()?;

Expand All @@ -276,7 +307,9 @@ impl Blockchain {
ChainError::WitnessGeneration("Failed to get block hashes".to_string())
})?
.clone();

block_hashes.extend(logger_block_hashes);

// Access all the accounts needed for withdrawals
if let Some(withdrawals) = block.body.withdrawals.as_ref() {
for withdrawal in withdrawals {
Expand Down Expand Up @@ -320,6 +353,7 @@ impl Blockchain {
used_storage_tries.insert(*account, (storage_trie_witness, storage_trie));
}
}

// Store all the accessed evm bytecodes
for code_hash in logger
.code_accessed
Expand All @@ -342,14 +376,21 @@ impl Blockchain {
}

// Apply account updates to the trie recording all the necessary nodes to do so
let (updated_trie, storage_tries_after_update) = self
let (storage_tries_after_update, account_updates_list) = self
.storage
.apply_account_updates_from_trie_with_witness(
trie,
&account_updates,
used_storage_tries,
)
.await?;

// We cannot ensure that the users of this function have the necessary
// state stored, so in order for it to not assume anything, we update
// the storage with the new state after re-execution
self.store_block(block.clone(), account_updates_list, execution_result)
.await?;

for (address, (witness, _storage_trie)) in storage_tries_after_update {
let mut witness = witness.lock().map_err(|_| {
ChainError::WitnessGeneration("Failed to lock storage trie witness".to_string())
Expand All @@ -359,15 +400,40 @@ impl Blockchain {
used_trie_nodes.extend_from_slice(&witness);
touched_account_storage_slots.entry(address).or_default();
}

let (new_state_trie_witness, updated_trie) = TrieLogger::open_trie(
self.storage
.state_trie(
block_hashes_map
.get(&block.header.number)
.ok_or(ChainError::WitnessGeneration(
"Block hash not found for witness generation".to_string(),
))?
.to_owned(),
)
.map_err(|_| ChainError::ParentStateNotFound)?
.ok_or(ChainError::ParentStateNotFound)?,
);

// Use the updated state trie for the next block
trie = updated_trie;

for state_trie_witness in current_trie_witness
.lock()
.map_err(|_| {
ChainError::WitnessGeneration("Failed to lock state trie witness".to_string())
})?
.iter()
{
accumulated_state_trie_witness.insert(state_trie_witness.clone());
}

current_trie_witness = new_state_trie_witness;
}

// Get the witness for the state trie
let mut state_trie_witness = state_trie_witness.lock().map_err(|_| {
ChainError::WitnessGeneration("Failed to lock state trie witness".to_string())
})?;
let state_trie_witness = std::mem::take(&mut *state_trie_witness);
used_trie_nodes.extend_from_slice(&Vec::from_iter(state_trie_witness.into_iter()));
used_trie_nodes
.extend_from_slice(&Vec::from_iter(accumulated_state_trie_witness.into_iter()));

// If the witness is empty at least try to store the root
if used_trie_nodes.is_empty()
&& let Some(root) = root_node
Expand All @@ -376,6 +442,7 @@ impl Blockchain {
}

let mut needed_block_numbers = block_hashes.keys().collect::<Vec<_>>();

needed_block_numbers.sort();

// Last needed block header for the witness is the parent of the last block we need to execute
Expand All @@ -385,17 +452,26 @@ impl Blockchain {
.header
.number
.saturating_sub(1);

// The first block number we need is either the parent of the first block number or the earliest block number used by BLOCKHASH
let mut first_needed_block_number = first_block_header.number.saturating_sub(1);

if let Some(block_number_from_logger) = needed_block_numbers.first()
&& **block_number_from_logger < first_needed_block_number
{
first_needed_block_number = **block_number_from_logger;
}

let mut block_headers_bytes = Vec::new();

for block_number in first_needed_block_number..=last_needed_block_number {
let header = self.storage.get_block_header(block_number)?.ok_or(
ChainError::WitnessGeneration("Failed to get block header".to_string()),
let hash = block_hashes_map
.get(&block_number)
.ok_or(ChainError::WitnessGeneration(format!(
"Failed to get block {block_number} hash"
)))?;
let header = self.storage.get_block_header_by_hash(*hash)?.ok_or(
ChainError::WitnessGeneration(format!("Failed to get block {block_number} header")),
)?;
block_headers_bytes.push(header.encode_to_vec());
}
Expand Down
1 change: 1 addition & 0 deletions crates/l2/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,5 +80,6 @@ panic = "deny"

[features]
default = ["l2"]
rocksdb = []
metrics = ["ethrex-blockchain/metrics"]
l2 = ["guest_program/l2"]
Loading
Loading