Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

46 changes: 20 additions & 26 deletions crates/prune/prune/src/segments/static_file/headers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ use std::num::NonZeroUsize;
use tracing::trace;

/// Number of header tables to prune in one step
const HEADER_TABLES_TO_PRUNE: usize = 3;
/// Note: `HeaderTerminalDifficulties` is no longer pruned after Paris/Merge as it's read-only
const HEADER_TABLES_TO_PRUNE: usize = 2;

#[derive(Debug)]
pub struct Headers<N> {
Expand Down Expand Up @@ -64,11 +65,12 @@ impl<Provider: StaticFileProviderFactory + DBProvider<Tx: DbTxMut>> Segment<Prov
let range = last_pruned_block.map_or(0, |block| block + 1)..=block_range_end;

let mut headers_cursor = provider.tx_ref().cursor_write::<tables::Headers>()?;
let mut header_tds_cursor =
provider.tx_ref().cursor_write::<tables::HeaderTerminalDifficulties>()?;
let mut canonical_headers_cursor =
provider.tx_ref().cursor_write::<tables::CanonicalHeaders>()?;

// Note: We no longer prune HeaderTerminalDifficulties table after Paris/Merge
// as it's read-only and kept for backward compatibility

let mut limiter = input.limiter.floor_deleted_entries_limit_to_multiple_of(
NonZeroUsize::new(HEADER_TABLES_TO_PRUNE).unwrap(),
);
Expand All @@ -77,7 +79,6 @@ impl<Provider: StaticFileProviderFactory + DBProvider<Tx: DbTxMut>> Segment<Prov
provider,
&mut limiter,
headers_cursor.walk_range(range.clone())?,
header_tds_cursor.walk_range(range.clone())?,
canonical_headers_cursor.walk_range(range)?,
);

Expand All @@ -102,6 +103,7 @@ impl<Provider: StaticFileProviderFactory + DBProvider<Tx: DbTxMut>> Segment<Prov
})
}
}

type Walker<'a, Provider, T> =
RangeWalker<'a, T, <<Provider as DBProvider>::Tx as DbTxMut>::CursorMut<T>>;

Expand All @@ -113,7 +115,6 @@ where
provider: &'a Provider,
limiter: &'a mut PruneLimiter,
headers_walker: Walker<'a, Provider, tables::Headers>,
header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>,
canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>,
}

Expand All @@ -130,10 +131,9 @@ where
provider: &'a Provider,
limiter: &'a mut PruneLimiter,
headers_walker: Walker<'a, Provider, tables::Headers>,
header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>,
canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>,
) -> Self {
Self { provider, limiter, headers_walker, header_tds_walker, canonical_headers_walker }
Self { provider, limiter, headers_walker, canonical_headers_walker }
}
}

Expand All @@ -148,7 +148,6 @@ where
}

let mut pruned_block_headers = None;
let mut pruned_block_td = None;
let mut pruned_block_canonical = None;

if let Err(err) = self.provider.tx_ref().prune_table_with_range_step(
Expand All @@ -160,15 +159,6 @@ where
return Some(Err(err.into()))
}

if let Err(err) = self.provider.tx_ref().prune_table_with_range_step(
&mut self.header_tds_walker,
self.limiter,
&mut |_| false,
&mut |row| pruned_block_td = Some(row.0),
) {
return Some(Err(err.into()))
}

if let Err(err) = self.provider.tx_ref().prune_table_with_range_step(
&mut self.canonical_headers_walker,
self.limiter,
Expand All @@ -178,7 +168,7 @@ where
return Some(Err(err.into()))
}

if ![pruned_block_headers, pruned_block_td, pruned_block_canonical].iter().all_equal() {
if ![pruned_block_headers, pruned_block_canonical].iter().all_equal() {
return Some(Err(PrunerError::InconsistentData(
"All headers-related tables should be pruned up to the same height",
)))
Expand Down Expand Up @@ -227,12 +217,13 @@ mod tests {

assert_eq!(db.table::<tables::CanonicalHeaders>().unwrap().len(), headers.len());
assert_eq!(db.table::<tables::Headers>().unwrap().len(), headers.len());
assert_eq!(db.table::<tables::HeaderTerminalDifficulties>().unwrap().len(), headers.len());
// Note: HeaderTerminalDifficulties table is read-only in database after Paris/Merge
// so we don't check its length as it's not being written to

let test_prune = |to_block: BlockNumber, expected_result: (PruneProgress, usize)| {
let segment = super::Headers::new(db.factory.static_file_provider());
let prune_mode = PruneMode::Before(to_block);
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10);
let mut limiter = PruneLimiter::default().set_deleted_entries_limit(6);
let input = PruneInput {
previous_checkpoint: db
.factory
Expand Down Expand Up @@ -291,10 +282,8 @@ mod tests {
db.table::<tables::Headers>().unwrap().len(),
headers.len() - (last_pruned_block_number + 1) as usize
);
assert_eq!(
db.table::<tables::HeaderTerminalDifficulties>().unwrap().len(),
headers.len() - (last_pruned_block_number + 1) as usize
);
// Note: HeaderTerminalDifficulties table is read-only in database after
// Paris/Merge so we don't check its length as it's not being written to
assert_eq!(
db.factory.provider().unwrap().get_prune_checkpoint(PruneSegment::Headers).unwrap(),
Some(PruneCheckpoint {
Expand All @@ -305,11 +294,16 @@ mod tests {
);
};

// First test: Prune with limit of 6 entries
// This will prune blocks 0-2 (3 blocks × 2 tables = 6 entries)
test_prune(
3,
(PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 9),
(PruneProgress::HasMoreData(PruneInterruptReason::DeletedEntriesLimitReached), 6),
);
test_prune(3, (PruneProgress::Finished, 3));

// Second test: Prune remaining blocks
// This will prune block 3 (1 block × 2 tables = 2 entries)
test_prune(3, (PruneProgress::Finished, 2));
}

#[test]
Expand Down
16 changes: 9 additions & 7 deletions crates/stages/stages/src/stages/headers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -336,9 +336,8 @@ where
(input.unwind_to + 1)..,
)?;
provider.tx_ref().unwind_table_by_num::<tables::CanonicalHeaders>(input.unwind_to)?;
provider
.tx_ref()
.unwind_table_by_num::<tables::HeaderTerminalDifficulties>(input.unwind_to)?;
// Note: We no longer unwind HeaderTerminalDifficulties table after Paris/Merge
// as it's read-only and kept for backward compatibility
let unfinalized_headers_unwound =
provider.tx_ref().unwind_table_by_num::<tables::Headers>(input.unwind_to)?;

Expand Down Expand Up @@ -565,10 +564,13 @@ mod tests {
.ensure_no_entry_above_by_value::<tables::HeaderNumbers, _>(block, |val| val)?;
self.db.ensure_no_entry_above::<tables::CanonicalHeaders, _>(block, |key| key)?;
self.db.ensure_no_entry_above::<tables::Headers, _>(block, |key| key)?;
self.db.ensure_no_entry_above::<tables::HeaderTerminalDifficulties, _>(
block,
|num| num,
)?;

// Note: We no longer unwind HeaderTerminalDifficulties table after Paris/Merge, so
// we don't need to ensure entry above
// self.db.ensure_no_entry_above::<tables::HeaderTerminalDifficulties, _>(
// block,
// |num| num,
// )?;
Ok(())
}

Expand Down
10 changes: 8 additions & 2 deletions crates/stages/stages/src/test_utils/test_db.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256, U256};
use reth_chainspec::MAINNET;
use reth_chainspec::{EthereumHardforks, MAINNET};
use reth_db::{
test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir},
DatabaseEnv,
Expand Down Expand Up @@ -162,8 +162,14 @@ impl TestStageDB {
writer.append_header(header.header(), td, &header.hash())?;
} else {
tx.put::<tables::CanonicalHeaders>(header.number, header.hash())?;
tx.put::<tables::HeaderTerminalDifficulties>(header.number, td.into())?;
// Note: HeaderTerminalDifficulties table is read-only in database after
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for future prs,

these docs are totally meaningless because the code doesnt have any references to HeaderTerminalDifficulties so after this is merged this note will just be confusing.

// Paris/Merge but still written to static files for historical data
tx.put::<tables::Headers>(header.number, header.header().clone())?;

// Only insert into HeaderTerminalDifficulties for pre-merge blocks
if !MAINNET.is_paris_active_at_block(header.number) {
tx.put::<tables::HeaderTerminalDifficulties>(header.number, td.into())?;
}
}

tx.put::<tables::HeaderNumbers>(header.hash(), header.number)?;
Expand Down
1 change: 1 addition & 0 deletions crates/static-file/static-file/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ reth-prune-types.workspace = true
reth-primitives-traits.workspace = true
reth-static-file-types.workspace = true
reth-stages-types.workspace = true
reth-chainspec.workspace = true

alloy-primitives.workspace = true

Expand Down
49 changes: 38 additions & 11 deletions crates/static-file/static-file/src/segments/headers.rs
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mattsse Could you clarify how this approach would work if we attempt to backfill post-merge ranges from the live DB? Wouldn’t we encounter missing HTD values, given we don’t store them anymore post-merge?

Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
use crate::segments::Segment;
use alloy_primitives::BlockNumber;
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_codecs::Compact;
use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx};
use reth_primitives_traits::NodePrimitives;
use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory};
use reth_provider::{
providers::StaticFileWriter, ChainSpecProvider, DBProvider, StaticFileProviderFactory,
};
use reth_static_file_types::StaticFileSegment;
use reth_storage_errors::provider::ProviderResult;
use std::ops::RangeInclusive;
Expand All @@ -15,7 +18,8 @@ pub struct Headers;
impl<Provider> Segment<Provider> for Headers
where
Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact + Value>>
+ DBProvider,
+ DBProvider
+ ChainSpecProvider<ChainSpec: EthereumHardforks>,
{
fn segment(&self) -> StaticFileSegment {
StaticFileSegment::Headers
Expand All @@ -38,23 +42,46 @@ where

let mut header_td_cursor =
provider.tx_ref().cursor_read::<tables::HeaderTerminalDifficulties>()?;
let header_td_walker = header_td_cursor.walk_range(block_range.clone())?;

let mut canonical_headers_cursor =
provider.tx_ref().cursor_read::<tables::CanonicalHeaders>()?;
let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range)?;
let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range.clone())?;

for ((header_entry, header_td_entry), canonical_header_entry) in
headers_walker.zip(header_td_walker).zip(canonical_headers_walker)
{
// Get the final Paris difficulty for post-merge blocks
let final_paris_difficulty = provider.chain_spec().final_paris_total_difficulty();

let header_td_walker = header_td_cursor.walk_range(block_range)?;
let mut header_td_iter = header_td_walker.peekable();

for (header_entry, canonical_header_entry) in headers_walker.zip(canonical_headers_walker) {
let (header_block, header) = header_entry?;
let (header_td_block, header_td) = header_td_entry?;
let (canonical_header_block, canonical_header) = canonical_header_entry?;

debug_assert_eq!(header_block, header_td_block);
debug_assert_eq!(header_td_block, canonical_header_block);
debug_assert_eq!(header_block, canonical_header_block);

// For post-merge blocks, use final Paris difficulty
// For pre-merge blocks, get the stored difficulty from the iterator
let total_difficulty = if provider.chain_spec().is_paris_active_at_block(header_block) {
final_paris_difficulty.unwrap_or_default()
} else {
// For pre-merge blocks, we expect an entry in the terminal difficulties table
// Check if we have a matching entry in our iterator
match header_td_iter.peek() {
Some(Ok((td_block, _))) if *td_block == header_block => {
// We have a matching entry, consume it
let (_, header_td) = header_td_iter.next().unwrap()?;
header_td.0
}
_ => {
// No matching entry for this pre-merge block - this shouldn't happen
return Err(reth_storage_errors::provider::ProviderError::HeaderNotFound(
header_block.into(),
));
}
}
};

static_file_writer.append_header(&header, header_td.0, &canonical_header)?;
static_file_writer.append_header(&header, total_difficulty, &canonical_header)?;
}

Ok(())
Expand Down
6 changes: 4 additions & 2 deletions crates/static-file/static-file/src/static_file_producer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,12 @@ use crate::{segments, segments::Segment, StaticFileProducerEvent};
use alloy_primitives::BlockNumber;
use parking_lot::Mutex;
use rayon::prelude::*;
use reth_chainspec::EthereumHardforks;
use reth_codecs::Compact;
use reth_db_api::table::Value;
use reth_primitives_traits::NodePrimitives;
use reth_provider::{
providers::StaticFileWriter, BlockReader, ChainStateBlockReader, DBProvider,
providers::StaticFileWriter, BlockReader, ChainSpecProvider, ChainStateBlockReader, DBProvider,
DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory,
};
use reth_prune_types::PruneModes;
Expand Down Expand Up @@ -96,7 +97,8 @@ where
Receipt: Value + Compact,
>,
> + StageCheckpointReader
+ BlockReader,
+ BlockReader
+ ChainSpecProvider<ChainSpec: EthereumHardforks>,
>,
{
/// Listen for events on the `static_file_producer`.
Expand Down
7 changes: 6 additions & 1 deletion crates/storage/db-api/src/tables/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,12 @@ tables! {
type Value = HeaderHash;
}

/// Stores the total difficulty from a block header.
/// Stores the total difficulty from block headers.
///
/// Note: This table is no longer written to after the Paris/Merge transition
/// as total difficulty is no longer used for consensus. Read operations are
/// maintained for backward compatibility. Total difficulty values are still
/// written to static files for historical record.
table HeaderTerminalDifficulties {
type Key = BlockNumber;
type Value = CompactU256;
Expand Down
4 changes: 3 additions & 1 deletion crates/storage/provider/src/providers/database/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ struct DatabaseProviderMetrics {
insert_headers: Histogram,
/// Duration of insert header numbers
insert_header_numbers: Histogram,
/// Duration of insert header TD
/// Duration of insert header TD (deprecated after Paris/Merge)
#[deprecated(note = "No longer used after Paris/Merge")]
insert_header_td: Histogram,
/// Duration of insert block body indices
insert_block_body_indices: Histogram,
Expand All @@ -107,6 +108,7 @@ impl DatabaseProviderMetrics {
Action::InsertCanonicalHeaders => self.insert_canonical_headers.record(duration),
Action::InsertHeaders => self.insert_headers.record(duration),
Action::InsertHeaderNumbers => self.insert_header_numbers.record(duration),
#[expect(deprecated)]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The #[expect(deprecated)] attribute is incorrect here - it should be #[allow(deprecated)] to properly suppress the deprecation warning. The expect attribute is used for different purposes in Rust (typically with #[expect(clippy::lint_name)] to acknowledge a clippy lint).

Suggested change
#[expect(deprecated)]
#[allow(deprecated)]

Spotted by Diamond

Is this helpful? React 👍 or 👎 to let us know.

Action::InsertHeaderTerminalDifficulties => self.insert_header_td.record(duration),
Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration),
Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration),
Expand Down
12 changes: 9 additions & 3 deletions crates/storage/provider/src/providers/database/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1016,6 +1016,7 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> HeaderProvider for DatabasePro
}
}

// For pre-merge blocks, try to get from static files first, then fall back to database
self.static_file_provider.get_with_static_file_or_database(
StaticFileSegment::Headers,
number,
Expand Down Expand Up @@ -2843,8 +2844,12 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider + 'static> BlockWrite
self.tx.put::<tables::Headers<HeaderTy<N>>>(block_number, block.header().clone())?;
durations_recorder.record_relative(metrics::Action::InsertHeaders);

self.tx.put::<tables::HeaderTerminalDifficulties>(block_number, ttd.into())?;
durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties);
// Only write to HeaderTerminalDifficulties for pre-merge blocks
if !self.chain_spec.is_paris_active_at_block(block_number) {
self.tx.put::<tables::HeaderTerminalDifficulties>(block_number, ttd.into())?;
durations_recorder
.record_relative(metrics::Action::InsertHeaderTerminalDifficulties);
}
}

if write_to.static_files() {
Expand Down Expand Up @@ -2983,7 +2988,8 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider + 'static> BlockWrite
// this table in `canonical_hashes_range`.
self.remove::<tables::CanonicalHeaders>(block + 1..)?;
self.remove::<tables::Headers<HeaderTy<N>>>(block + 1..)?;
self.remove::<tables::HeaderTerminalDifficulties>(block + 1..)?;
// Note: HeaderTerminalDifficulties table is read-only in the database after
// Paris/Merge, so we do not remove entries from it here.

// First transaction to be removed
let unwind_tx_from = self
Expand Down
Loading