diff --git a/bin/reth/src/commands/db/diff.rs b/bin/reth/src/commands/db/diff.rs index 313db210884..6ead06a66ca 100644 --- a/bin/reth/src/commands/db/diff.rs +++ b/bin/reth/src/commands/db/diff.rs @@ -6,11 +6,12 @@ use crate::{ use clap::Parser; use reth_db::{ cursor::DbCursorRO, database::Database, mdbx::DatabaseArguments, open_db_read_only, - table::Table, transaction::DbTx, AccountChangeSet, AccountHistory, AccountsTrie, + table::Table, transaction::DbTx, AccountChangeSets, AccountsHistory, AccountsTrie, BlockBodyIndices, BlockOmmers, BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv, - HashedAccount, HashedStorage, HeaderNumbers, HeaderTD, Headers, PlainAccountState, - PlainStorageState, PruneCheckpoints, Receipts, StorageChangeSet, StorageHistory, StoragesTrie, - SyncStage, SyncStageProgress, Tables, TransactionBlock, Transactions, TxHashNumber, TxSenders, + HashedAccounts, HashedStorages, HeaderNumbers, HeaderTerminalDifficulties, Headers, + PlainAccountState, PlainStorageState, PruneCheckpoints, Receipts, StageCheckpointProgresses, + StageCheckpoints, StorageChangeSets, StoragesHistory, StoragesTrie, Tables, TransactionBlocks, + TransactionHashNumbers, TransactionSenders, Transactions, }; use std::{ collections::HashMap, @@ -78,7 +79,9 @@ impl Command { Tables::CanonicalHeaders => { find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::HeaderTD => find_diffs::(primary_tx, secondary_tx, output_dir)?, + Tables::HeaderTerminalDifficulties => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } Tables::HeaderNumbers => { find_diffs::(primary_tx, secondary_tx, output_dir)? } @@ -92,14 +95,14 @@ impl Command { Tables::BlockWithdrawals => { find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::TransactionBlock => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::TransactionBlocks => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } Tables::Transactions => { find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::TxHashNumber => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::TransactionHashNumbers => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } Tables::Receipts => find_diffs::(primary_tx, secondary_tx, output_dir)?, Tables::PlainAccountState => { @@ -109,23 +112,23 @@ impl Command { find_diffs::(primary_tx, secondary_tx, output_dir)? } Tables::Bytecodes => find_diffs::(primary_tx, secondary_tx, output_dir)?, - Tables::AccountHistory => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::AccountsHistory => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::StorageHistory => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::StoragesHistory => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::AccountChangeSet => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::AccountChangeSets => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::StorageChangeSet => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::StorageChangeSets => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::HashedAccount => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::HashedAccounts => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::HashedStorage => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::HashedStorages => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } Tables::AccountsTrie => { find_diffs::(primary_tx, secondary_tx, output_dir)? @@ -133,10 +136,14 @@ impl Command { Tables::StoragesTrie => { find_diffs::(primary_tx, secondary_tx, output_dir)? } - Tables::TxSenders => find_diffs::(primary_tx, secondary_tx, output_dir)?, - Tables::SyncStage => find_diffs::(primary_tx, secondary_tx, output_dir)?, - Tables::SyncStageProgress => { - find_diffs::(primary_tx, secondary_tx, output_dir)? + Tables::TransactionSenders => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::StageCheckpoints => { + find_diffs::(primary_tx, secondary_tx, output_dir)? + } + Tables::StageCheckpointProgresses => { + find_diffs::(primary_tx, secondary_tx, output_dir)? } Tables::PruneCheckpoints => { find_diffs::(primary_tx, secondary_tx, output_dir)? diff --git a/bin/reth/src/commands/db/get.rs b/bin/reth/src/commands/db/get.rs index dc64c08e31d..f39fced266d 100644 --- a/bin/reth/src/commands/db/get.rs +++ b/bin/reth/src/commands/db/get.rs @@ -113,7 +113,7 @@ mod tests { use clap::{Args, Parser}; use reth_db::{ models::{storage_sharded_key::StorageShardedKey, ShardedKey}, - AccountHistory, HashedAccount, Headers, StorageHistory, SyncStage, + AccountsHistory, HashedAccounts, Headers, StageCheckpoints, StoragesHistory, }; use reth_primitives::{Address, B256}; use std::str::FromStr; @@ -132,12 +132,12 @@ mod tests { let args = CommandParser::::parse_from([ "reth", - "HashedAccount", + "HashedAccounts", "0x0ac361fe774b78f8fc4e86c1916930d150865c3fc2e21dca2e58833557608bac", ]) .args; assert_eq!( - args.table_key::().unwrap(), + args.table_key::().unwrap(), B256::from_str("0x0ac361fe774b78f8fc4e86c1916930d150865c3fc2e21dca2e58833557608bac") .unwrap() ); @@ -146,15 +146,16 @@ mod tests { #[test] fn parse_string_key_args() { let args = - CommandParser::::parse_from(["reth", "SyncStage", "MerkleExecution"]).args; - assert_eq!(args.table_key::().unwrap(), "MerkleExecution"); + CommandParser::::parse_from(["reth", "StageCheckpoints", "MerkleExecution"]) + .args; + assert_eq!(args.table_key::().unwrap(), "MerkleExecution"); } #[test] fn parse_json_key_args() { - let args = CommandParser::::parse_from(["reth", "StorageHistory", r#"{ "address": "0x01957911244e546ce519fbac6f798958fafadb41", "sharded_key": { "key": "0x0000000000000000000000000000000000000000000000000000000000000003", "highest_block_number": 18446744073709551615 } }"#]).args; + let args = CommandParser::::parse_from(["reth", "StoragesHistory", r#"{ "address": "0x01957911244e546ce519fbac6f798958fafadb41", "sharded_key": { "key": "0x0000000000000000000000000000000000000000000000000000000000000003", "highest_block_number": 18446744073709551615 } }"#]).args; assert_eq!( - args.table_key::().unwrap(), + args.table_key::().unwrap(), StorageShardedKey::new( Address::from_str("0x01957911244e546ce519fbac6f798958fafadb41").unwrap(), B256::from_str( @@ -168,9 +169,9 @@ mod tests { #[test] fn parse_json_key_for_account_history() { - let args = CommandParser::::parse_from(["reth", "AccountHistory", r#"{ "key": "0x4448e1273fd5a8bfdb9ed111e96889c960eee145", "highest_block_number": 18446744073709551615 }"#]).args; + let args = CommandParser::::parse_from(["reth", "AccountsHistory", r#"{ "key": "0x4448e1273fd5a8bfdb9ed111e96889c960eee145", "highest_block_number": 18446744073709551615 }"#]).args; assert_eq!( - args.table_key::().unwrap(), + args.table_key::().unwrap(), ShardedKey::new( Address::from_str("0x4448e1273fd5a8bfdb9ed111e96889c960eee145").unwrap(), 18446744073709551615 diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/bin/reth/src/commands/recover/storage_tries.rs index 37e03743cbb..d0ec0281ba2 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/bin/reth/src/commands/recover/storage_tries.rs @@ -62,7 +62,7 @@ impl Command { let mut deleted_tries = 0; let tx_mut = provider.tx_mut(); - let mut hashed_account_cursor = tx_mut.cursor_read::()?; + let mut hashed_account_cursor = tx_mut.cursor_read::()?; let mut storage_trie_cursor = tx_mut.cursor_dup_read::()?; let mut entry = storage_trie_cursor.first()?; diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index 3187932c2a0..6ff767d7b5a 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -66,15 +66,18 @@ impl Command { StageEnum::Bodies => { tx.clear::()?; tx.clear::()?; - tx.clear::()?; + tx.clear::()?; tx.clear::()?; tx.clear::()?; - tx.put::(StageId::Bodies.to_string(), Default::default())?; + tx.put::( + StageId::Bodies.to_string(), + Default::default(), + )?; insert_genesis_header::(tx, self.chain)?; } StageEnum::Senders => { - tx.clear::()?; - tx.put::( + tx.clear::()?; + tx.put::( StageId::SenderRecovery.to_string(), Default::default(), )?; @@ -82,41 +85,41 @@ impl Command { StageEnum::Execution => { tx.clear::()?; tx.clear::()?; - tx.clear::()?; - tx.clear::()?; + tx.clear::()?; + tx.clear::()?; tx.clear::()?; tx.clear::()?; - tx.put::( + tx.put::( StageId::Execution.to_string(), Default::default(), )?; insert_genesis_state::(tx, self.chain.genesis())?; } StageEnum::AccountHashing => { - tx.clear::()?; - tx.put::( + tx.clear::()?; + tx.put::( StageId::AccountHashing.to_string(), Default::default(), )?; } StageEnum::StorageHashing => { - tx.clear::()?; - tx.put::( + tx.clear::()?; + tx.put::( StageId::StorageHashing.to_string(), Default::default(), )?; } StageEnum::Hashing => { // Clear hashed accounts - tx.clear::()?; - tx.put::( + tx.clear::()?; + tx.put::( StageId::AccountHashing.to_string(), Default::default(), )?; // Clear hashed storages - tx.clear::()?; - tx.put::( + tx.clear::()?; + tx.put::( StageId::StorageHashing.to_string(), Default::default(), )?; @@ -124,42 +127,42 @@ impl Command { StageEnum::Merkle => { tx.clear::()?; tx.clear::()?; - tx.put::( + tx.put::( StageId::MerkleExecute.to_string(), Default::default(), )?; - tx.put::( + tx.put::( StageId::MerkleUnwind.to_string(), Default::default(), )?; - tx.delete::( + tx.delete::( StageId::MerkleExecute.to_string(), None, )?; } StageEnum::AccountHistory | StageEnum::StorageHistory => { - tx.clear::()?; - tx.clear::()?; - tx.put::( + tx.clear::()?; + tx.clear::()?; + tx.put::( StageId::IndexAccountHistory.to_string(), Default::default(), )?; - tx.put::( + tx.put::( StageId::IndexStorageHistory.to_string(), Default::default(), )?; } StageEnum::TotalDifficulty => { - tx.clear::()?; - tx.put::( + tx.clear::()?; + tx.put::( StageId::TotalDifficulty.to_string(), Default::default(), )?; insert_genesis_header::(tx, self.chain)?; } StageEnum::TxLookup => { - tx.clear::()?; - tx.put::( + tx.clear::()?; + tx.put::( StageId::TransactionLookup.to_string(), Default::default(), )?; @@ -171,7 +174,7 @@ impl Command { } } - tx.put::(StageId::Finish.to_string(), Default::default())?; + tx.put::(StageId::Finish.to_string(), Default::default())?; Ok::<_, eyre::Error>(()) })??; diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/bin/reth/src/commands/stage/dump/execution.rs index b6a2c94cf80..5c357e56ee0 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/bin/reth/src/commands/stage/dump/execution.rs @@ -46,7 +46,11 @@ fn import_tables_with_range( tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) })??; output_db.update(|tx| { - tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) + tx.import_table_with_range::( + &db_tool.db.tx()?, + Some(from), + to, + ) })??; output_db.update(|tx| { tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) @@ -81,7 +85,11 @@ fn import_tables_with_range( })??; output_db.update(|tx| { - tx.import_table_with_range::(&db_tool.db.tx()?, Some(from_tx), to_tx) + tx.import_table_with_range::( + &db_tool.db.tx()?, + Some(from_tx), + to_tx, + ) })??; Ok(()) diff --git a/bin/reth/src/commands/stage/dump/hashing_account.rs b/bin/reth/src/commands/stage/dump/hashing_account.rs index 7fe723257f6..be0723b495b 100644 --- a/bin/reth/src/commands/stage/dump/hashing_account.rs +++ b/bin/reth/src/commands/stage/dump/hashing_account.rs @@ -19,7 +19,11 @@ pub(crate) async fn dump_hashing_account_stage( // Import relevant AccountChangeSets output_db.update(|tx| { - tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) + tx.import_table_with_range::( + &db_tool.db.tx()?, + Some(from), + to, + ) })??; unwind_and_copy(db_tool, from, tip_block_number, &output_db)?; diff --git a/bin/reth/src/commands/stage/dump/hashing_storage.rs b/bin/reth/src/commands/stage/dump/hashing_storage.rs index 37381807252..c05bc66b260 100644 --- a/bin/reth/src/commands/stage/dump/hashing_storage.rs +++ b/bin/reth/src/commands/stage/dump/hashing_storage.rs @@ -51,7 +51,8 @@ fn unwind_and_copy( // TODO optimize we can actually just get the entries we need for both these tables output_db .update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; - output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db + .update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; Ok(()) } diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/bin/reth/src/commands/stage/dump/merkle.rs index f0f1fc233f6..a85303c9d0c 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/bin/reth/src/commands/stage/dump/merkle.rs @@ -29,7 +29,11 @@ pub(crate) async fn dump_merkle_stage( })??; output_db.update(|tx| { - tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) + tx.import_table_with_range::( + &db_tool.db.tx()?, + Some(from), + to, + ) })??; unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db).await?; @@ -100,10 +104,11 @@ async fn unwind_and_copy( let unwind_inner_tx = provider.into_tx(); // TODO optimize we can actually just get the entries we need - output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db + .update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; - output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; - output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; + output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; diff --git a/bin/reth/src/commands/test_vectors/tables.rs b/bin/reth/src/commands/test_vectors/tables.rs index 2fa4f760f89..5679317a9a0 100644 --- a/bin/reth/src/commands/test_vectors/tables.rs +++ b/bin/reth/src/commands/test_vectors/tables.rs @@ -58,12 +58,12 @@ pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { generate!([ (CanonicalHeaders, PER_TABLE, TABLE), - (HeaderTD, PER_TABLE, TABLE), + (HeaderTerminalDifficulties, PER_TABLE, TABLE), (HeaderNumbers, PER_TABLE, TABLE), (Headers, PER_TABLE, TABLE), (BlockBodyIndices, PER_TABLE, TABLE), (BlockOmmers, 100, TABLE), - (TxHashNumber, PER_TABLE, TABLE), + (TransactionHashNumbers, PER_TABLE, TABLE), (Transactions, 100, TABLE), (PlainStorageState, PER_TABLE, DUPSORT), (PlainAccountState, PER_TABLE, TABLE) diff --git a/book/cli/reth/db/snapshot.md b/book/cli/reth/db/snapshot.md index 8e4aef50126..a612de81c21 100644 --- a/book/cli/reth/db/snapshot.md +++ b/book/cli/reth/db/snapshot.md @@ -11,44 +11,44 @@ Arguments: Snapshot segments to generate Possible values: - - headers: Snapshot segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTD` tables + - headers: Snapshot segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables - transactions: Snapshot segment responsible for the `Transactions` table - receipts: Snapshot segment responsible for the `Receipts` table Options: --datadir The path to the data dir for all reth files and subdirectories. - + Defaults to the OS-specific data directory: - + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - Windows: `{FOLDERID_RoamingAppData}/reth/` - macOS: `$HOME/Library/Application Support/reth/` - + [default: default] -f, --from Starting block for the snapshot - + [default: 0] -b, --block-interval Number of blocks in the snapshot - + [default: 500000] --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. - + Built-in chains: mainnet, sepolia, goerli, holesky, dev - + [default: mainnet] -p, --parallel Sets the number of snapshots built in parallel. Note: Each parallel build is memory-intensive - + [default: 1] --only-stats @@ -62,7 +62,7 @@ Options: -c, --compression Compression algorithms to use - + [default: uncompressed] Possible values: @@ -83,13 +83,13 @@ Options: --instance Add a new instance of a node. - + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. - + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. - + Changes to the following port numbers: - DISCOVERY_PORT: default + `instance` - 1 - AUTH_PORT: default + `instance` * 100 - 100 - HTTP_RPC_PORT: default - `instance` + 1 - WS_RPC_PORT: default + `instance` * 2 - 2 - + [default: 1] -h, --help @@ -98,7 +98,7 @@ Options: Logging: --log.stdout.format The format to use for logs written to stdout - + [default: terminal] Possible values: @@ -108,12 +108,12 @@ Logging: --log.stdout.filter The filter to use for logs written to stdout - + [default: ] --log.file.format The format to use for logs written to the log file - + [default: terminal] Possible values: @@ -123,22 +123,22 @@ Logging: --log.file.filter The filter to use for logs written to the log file - + [default: debug] --log.file.directory The path to put log files in - + [default: /logs] --log.file.max-size The maximum size (in MB) of one log file - + [default: 200] --log.file.max-files The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - + [default: 5] --log.journald @@ -146,12 +146,12 @@ Logging: --log.journald.filter The filter to use for logs written to journald - + [default: error] --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - + [default: always] Possible values: @@ -162,7 +162,7 @@ Logging: Display: -v, --verbosity... Set the minimum log level. - + -v Errors -vv Warnings -vvv Info @@ -171,4 +171,4 @@ Display: -q, --quiet Silence all log output -``` \ No newline at end of file +``` diff --git a/book/run/pruning.md b/book/run/pruning.md index acfb93c9cb0..6800b7f5fa1 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -2,7 +2,7 @@ > Pruning and full node are new features of Reth, > and we will be happy to hear about your experience using them either -> on [GitHub](https://github.com/paradigmxyz/reth/issues) or in the [Telegram group](https://t.me/paradigm_reth). +> on [GitHub](https://github.com/paradigmxyz/reth/issues) or in the [Telegram group](https://t.me/paradigm_reth). By default, Reth runs as an archive node. Such nodes have all historical blocks and the state at each of these blocks available for querying and tracing. @@ -14,14 +14,15 @@ the steps for running Reth as a full node, what caveats to expect and how to con - Archive node – Reth node that has all historical data from genesis. - Pruned node – Reth node that has its historical data pruned partially or fully through -a [custom configuration](./config.md#the-prune-section). + a [custom configuration](./config.md#the-prune-section). - Full Node – Reth node that has the latest state and historical data for only the last 10064 blocks available -for querying in the same way as an archive node. + for querying in the same way as an archive node. The node type that was chosen when first [running a node](./run-a-node.md) **can not** be changed after the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported. ## Modes + ### Archive Node Default mode, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md). @@ -36,6 +37,7 @@ the previous chapter on [how to run on mainnet or official testnets](./mainnet.m To run Reth as a full node, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md), and add a `--full` flag. For example: + ```bash RUST_LOG=info reth node \ --full \ @@ -61,7 +63,7 @@ Different segments take up different amounts of disk space. If pruned fully, this is the total freed space you'll get, per segment: | Segment | Size | -|--------------------|-------| +| ------------------ | ----- | | Sender Recovery | 75GB | | Transaction Lookup | 150GB | | Receipts | 250GB | @@ -73,6 +75,7 @@ If pruned fully, this is the total freed space you'll get, per segment: Full node occupies at least 950GB. Essentially, the full node is the same as following configuration for the pruned node: + ```toml [prune] block_interval = 5 @@ -91,15 +94,18 @@ storage_history = { distance = 10_064 } ``` Meaning, it prunes: + - Account History and Storage History up to the last 10064 blocks - All of Sender Recovery data. The caveat is that it's pruned gradually after the initial sync -is completed, so the disk space is reclaimed slowly. + is completed, so the disk space is reclaimed slowly. - Receipts up to the last 10064 blocks, preserving all receipts with the logs from Beacon Deposit Contract Given the aforementioned segment sizes, we get the following full node size: + ```text -Archive Node - Receipts - AccountHistory - StorageHistory = Full Node +Archive Node - Receipts - AccountsHistory - StoragesHistory = Full Node ``` + ```text 2.14TB - 250GB - 240GB - 700GB = 950GB ``` @@ -108,6 +114,7 @@ Archive Node - Receipts - AccountHistory - StorageHistory = Full Node As it was mentioned in the [pruning configuration chapter](./config.md#the-prune-section), there are several segments which can be pruned independently of each other: + - Sender Recovery - Transaction Lookup - Receipts @@ -121,11 +128,10 @@ become unavailable. The following tables describe RPC methods available in the full node. - #### `debug` namespace | RPC | Note | -|----------------------------|------------------------------------------------------------| +| -------------------------- | ---------------------------------------------------------- | | `debug_getRawBlock` | | | `debug_getRawHeader` | | | `debug_getRawReceipts` | Only for the last 10064 blocks and Beacon Deposit Contract | @@ -137,11 +143,10 @@ The following tables describe RPC methods available in the full node. | `debug_traceCallMany` | Only for the last 10064 blocks | | `debug_traceTransaction` | Only for the last 10064 blocks | - #### `eth` namespace | RPC / Segment | Note | -|-------------------------------------------|------------------------------------------------------------| +| ----------------------------------------- | ---------------------------------------------------------- | | `eth_accounts` | | | `eth_blockNumber` | | | `eth_call` | Only for the last 10064 blocks | @@ -189,7 +194,7 @@ The following tables describe RPC methods available in the full node. #### `net` namespace | RPC / Segment | -|-----------------| +| --------------- | | `net_listening` | | `net_peerCount` | | `net_version` | @@ -197,7 +202,7 @@ The following tables describe RPC methods available in the full node. #### `trace` namespace | RPC / Segment | Note | -|---------------------------------|--------------------------------| +| ------------------------------- | ------------------------------ | | `trace_block` | Only for the last 10064 blocks | | `trace_call` | Only for the last 10064 blocks | | `trace_callMany` | Only for the last 10064 blocks | @@ -210,109 +215,108 @@ The following tables describe RPC methods available in the full node. #### `txpool` namespace | RPC / Segment | -|----------------------| +| -------------------- | | `txpool_content` | | `txpool_contentFrom` | | `txpool_inspect` | | `txpool_status` | - ### Pruned Node The following tables describe the requirements for prune segments, per RPC method: + - ✅ – if the segment is pruned, the RPC method still works - ❌ - if the segment is pruned, the RPC method doesn't work anymore #### `debug` namespace | RPC / Segment | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | -|----------------------------|-----------------|--------------------|----------|-----------------|-----------------| -| `debug_getRawBlock` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `debug_getRawHeader` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `debug_getRawReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | -| `debug_getRawTransaction` | ✅ | ❌ | ✅ | ✅ | ✅ | -| `debug_traceBlock` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceBlockByHash` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceBlockByNumber` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceCall` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceCallMany` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | - +| -------------------------- | --------------- | ------------------ | -------- | --------------- | --------------- | +| `debug_getRawBlock` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawHeader` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `debug_getRawTransaction` | ✅ | ❌ | ✅ | ✅ | ✅ | +| `debug_traceBlock` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceBlockByHash` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceBlockByNumber` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceCall` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceCallMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | #### `eth` namespace | RPC / Segment | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | -|-------------------------------------------|-----------------|--------------------|----------|-----------------|-----------------| -| `eth_accounts` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_blockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_call` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `eth_chainId` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_createAccessList` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `eth_estimateGas` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `eth_feeHistory` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_gasPrice` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBalance` | ✅ | ✅ | ✅ | ❌ | ✅ | -| `eth_getBlockByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBlockByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBlockReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | -| `eth_getBlockTransactionCountByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getBlockTransactionCountByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getCode` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getFilterChanges` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getFilterLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | -| `eth_getLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | -| `eth_getStorageAt` | ✅ | ✅ | ✅ | ✅ | ❌ | -| `eth_getTransactionByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getTransactionByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getTransactionByHash` | ✅ | ❌ | ✅ | ✅ | ✅ | -| `eth_getTransactionCount` | ✅ | ✅ | ✅ | ❌ | ✅ | -| `eth_getTransactionReceipt` | ✅ | ❌ | ❌ | ✅ | ✅ | -| `eth_getUncleByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getUncleByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getUncleCountByBlockHash` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_getUncleCountByBlockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_maxPriorityFeePerGas` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_mining` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_newBlockFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_newFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_newPendingTransactionFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_protocolVersion` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_sendRawTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_sendTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_sign` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_signTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_signTypedData` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_subscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_syncing` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_uninstallFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `eth_unsubscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | +| ----------------------------------------- | --------------- | ------------------ | -------- | --------------- | --------------- | +| `eth_accounts` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_blockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_call` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_chainId` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_createAccessList` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_estimateGas` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `eth_feeHistory` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_gasPrice` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBalance` | ✅ | ✅ | ✅ | ❌ | ✅ | +| `eth_getBlockByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getBlockTransactionCountByHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getBlockTransactionCountByNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getCode` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getFilterChanges` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getFilterLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getLogs` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `eth_getStorageAt` | ✅ | ✅ | ✅ | ✅ | ❌ | +| `eth_getTransactionByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getTransactionByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getTransactionByHash` | ✅ | ❌ | ✅ | ✅ | ✅ | +| `eth_getTransactionCount` | ✅ | ✅ | ✅ | ❌ | ✅ | +| `eth_getTransactionReceipt` | ✅ | ❌ | ❌ | ✅ | ✅ | +| `eth_getUncleByBlockHashAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleByBlockNumberAndIndex` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleCountByBlockHash` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_getUncleCountByBlockNumber` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_maxPriorityFeePerGas` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_mining` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newBlockFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_newPendingTransactionFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_protocolVersion` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sendRawTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sendTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_sign` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_signTransaction` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_signTypedData` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_subscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_syncing` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_uninstallFilter` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `eth_unsubscribe` | ✅ | ✅ | ✅ | ✅ | ✅ | #### `net` namespace | RPC / Segment | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | -|-----------------|-----------------|--------------------|----------|-----------------|-----------------| -| `net_listening` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `net_peerCount` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `net_version` | ✅ | ✅ | ✅ | ✅ | ✅ | +| --------------- | --------------- | ------------------ | -------- | --------------- | --------------- | +| `net_listening` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `net_peerCount` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `net_version` | ✅ | ✅ | ✅ | ✅ | ✅ | #### `trace` namespace | RPC / Segment | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | -|---------------------------------|-----------------|--------------------|----------|-----------------|-----------------| -| `trace_block` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `trace_call` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `trace_callMany` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `trace_get` | ✅ | ❌ | ✅ | ❌ | ❌ | -| `trace_rawTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `trace_replayBlockTransactions` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `trace_replayTransaction` | ✅ | ❌ | ✅ | ❌ | ❌ | -| `trace_transaction` | ✅ | ❌ | ✅ | ❌ | ❌ | +| ------------------------------- | --------------- | ------------------ | -------- | --------------- | --------------- | +| `trace_block` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_call` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_callMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_get` | ✅ | ❌ | ✅ | ❌ | ❌ | +| `trace_rawTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_replayBlockTransactions` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_replayTransaction` | ✅ | ❌ | ✅ | ❌ | ❌ | +| `trace_transaction` | ✅ | ❌ | ✅ | ❌ | ❌ | #### `txpool` namespace | RPC / Segment | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | -|----------------------|-----------------|--------------------|----------|-----------------|-----------------| -| `txpool_content` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `txpool_contentFrom` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `txpool_inspect` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `txpool_status` | ✅ | ✅ | ✅ | ✅ | ✅ | +| -------------------- | --------------- | ------------------ | -------- | --------------- | --------------- | +| `txpool_content` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_contentFrom` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_inspect` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `txpool_status` | ✅ | ✅ | ✅ | ✅ | ✅ | diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 2d678bc728d..fd90ff4e19d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1309,7 +1309,7 @@ mod tests { } provider .tx_ref() - .put::("Finish".to_string(), StageCheckpoint::new(10)) + .put::("Finish".to_string(), StageCheckpoint::new(10)) .unwrap(); provider.commit().unwrap(); } @@ -1423,7 +1423,7 @@ mod tests { .unwrap(); let account = Account { balance: initial_signer_balance, ..Default::default() }; provider_rw.tx_ref().put::(signer, account).unwrap(); - provider_rw.tx_ref().put::(keccak256(signer), account).unwrap(); + provider_rw.tx_ref().put::(keccak256(signer), account).unwrap(); provider_rw.commit().unwrap(); } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 1cec504b7ce..a7c9e9fdc4b 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -2142,7 +2142,7 @@ mod tests { insert_blocks(env.db.as_ref(), chain_spec.clone(), [&genesis, &block1].into_iter()); env.db .update(|tx| { - tx.put::( + tx.put::( StageId::Finish.to_string(), StageCheckpoint::new(block1.number), ) diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 18038405cba..f69d3e73224 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -206,7 +206,7 @@ impl TransactionFetcher { // tx is really big, pack request with single tx if size >= self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request { - return hashes_from_announcement_iter.collect::(); + return hashes_from_announcement_iter.collect::() } else { acc_size_response = size; } @@ -332,7 +332,7 @@ impl TransactionFetcher { ); max_retried_and_evicted_hashes.push(hash); - continue; + continue } *retries += 1; } diff --git a/crates/node-api/src/engine/traits.rs b/crates/node-api/src/engine/traits.rs index bdcc82e5842..e80323dd416 100644 --- a/crates/node-api/src/engine/traits.rs +++ b/crates/node-api/src/engine/traits.rs @@ -159,7 +159,7 @@ impl PayloadAttributes for OptimismPayloadAttributes { if self.gas_limit.is_none() && chain_spec.is_optimism() { return Err(AttributesValidationError::InvalidParams( "MissingGasLimitInPayloadAttributes".to_string().into(), - )); + )) } Ok(()) diff --git a/crates/node-core/src/init.rs b/crates/node-core/src/init.rs index dd0eca9f352..406c701ef4a 100644 --- a/crates/node-core/src/init.rs +++ b/crates/node-core/src/init.rs @@ -85,7 +85,7 @@ pub fn init_genesis( // insert sync stage for stage in StageId::ALL.iter() { - tx.put::(stage.to_string(), Default::default())?; + tx.put::(stage.to_string(), Default::default())?; } tx.commit()?; @@ -215,7 +215,7 @@ pub fn insert_genesis_header( tx.put::(0, block_hash)?; tx.put::(block_hash, 0)?; tx.put::(0, Default::default())?; - tx.put::(0, header.difficulty.into())?; + tx.put::(0, header.difficulty.into())?; tx.put::(0, header)?; Ok(()) @@ -327,7 +327,7 @@ mod tests { let tx = db.tx().expect("failed to init tx"); assert_eq!( - collect_table_entries::, tables::AccountHistory>(&tx) + collect_table_entries::, tables::AccountsHistory>(&tx) .expect("failed to collect"), vec![ (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()), @@ -336,7 +336,7 @@ mod tests { ); assert_eq!( - collect_table_entries::, tables::StorageHistory>(&tx) + collect_table_entries::, tables::StoragesHistory>(&tx) .expect("failed to collect"), vec![( StorageShardedKey::new(address_with_storage, storage_key, u64::MAX), diff --git a/crates/primitives/src/prune/segment.rs b/crates/primitives/src/prune/segment.rs index 0806ce909ce..964a18e1fda 100644 --- a/crates/primitives/src/prune/segment.rs +++ b/crates/primitives/src/prune/segment.rs @@ -7,19 +7,20 @@ use thiserror::Error; #[main_codec] #[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] pub enum PruneSegment { - /// Prune segment responsible for the `TxSenders` table. + /// Prune segment responsible for the `TransactionSenders` table. SenderRecovery, - /// Prune segment responsible for the `TxHashNumber` table. + /// Prune segment responsible for the `TransactionHashNumbers` table. TransactionLookup, /// Prune segment responsible for all rows in `Receipts` table. Receipts, /// Prune segment responsible for some rows in `Receipts` table filtered by logs. ContractLogs, - /// Prune segment responsible for the `AccountChangeSet` and `AccountHistory` tables. + /// Prune segment responsible for the `AccountChangeSets` and `AccountsHistory` tables. AccountHistory, - /// Prune segment responsible for the `StorageChangeSet` and `StorageHistory` tables. + /// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables. StorageHistory, - /// Prune segment responsible for the `CanonicalHeaders`, `Headers` and `HeaderTD` tables. + /// Prune segment responsible for the `CanonicalHeaders`, `Headers` and + /// `HeaderTerminalDifficulties` tables. Headers, /// Prune segment responsible for the `Transactions` table. Transactions, diff --git a/crates/primitives/src/snapshot/segment.rs b/crates/primitives/src/snapshot/segment.rs index 931db830ad7..2f5b1442efd 100644 --- a/crates/primitives/src/snapshot/segment.rs +++ b/crates/primitives/src/snapshot/segment.rs @@ -27,7 +27,8 @@ use strum::{AsRefStr, EnumIter, EnumString}; /// Segment of the data that can be snapshotted. pub enum SnapshotSegment { #[strum(serialize = "headers")] - /// Snapshot segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTD` tables. + /// Snapshot segment responsible for the `CanonicalHeaders`, `Headers`, + /// `HeaderTerminalDifficulties` tables. Headers, #[strum(serialize = "transactions")] /// Snapshot segment responsible for the `Transactions` table. diff --git a/crates/primitives/src/storage.rs b/crates/primitives/src/storage.rs index b2f5ab1d9de..2e03424d2b4 100644 --- a/crates/primitives/src/storage.rs +++ b/crates/primitives/src/storage.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; /// Account storage entry. /// -/// `key` is the subkey when used as a value in the `StorageChangeSet` table. +/// `key` is the subkey when used as a value in the `StorageChangeSets` table. #[derive_arbitrary(compact)] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] pub struct StorageEntry { diff --git a/crates/prune/src/segments/account_history.rs b/crates/prune/src/segments/account_history.rs index 592552109c2..cf6b6e6046d 100644 --- a/crates/prune/src/segments/account_history.rs +++ b/crates/prune/src/segments/account_history.rs @@ -46,7 +46,7 @@ impl Segment for AccountHistory { let mut last_changeset_pruned_block = None; let (pruned_changesets, done) = provider - .prune_table_with_range::( + .prune_table_with_range::( range, input.delete_limit / 2, |_| false, @@ -60,7 +60,7 @@ impl Segment for AccountHistory { .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) .unwrap_or(range_end); - let (processed, pruned_indices) = prune_history_indices::( + let (processed, pruned_indices) = prune_history_indices::( provider, last_changeset_pruned_block, |a, b| a.key == b.key, @@ -113,7 +113,7 @@ mod tests { db.insert_changesets(changesets.clone(), None).expect("insert changesets"); db.insert_history(changesets.clone(), None).expect("insert history"); - let account_occurrences = db.table::().unwrap().into_iter().fold( + let account_occurrences = db.table::().unwrap().into_iter().fold( BTreeMap::<_, usize>::new(), |mut map, (key, _)| { map.entry(key.key).or_default().add_assign(1); @@ -123,11 +123,11 @@ mod tests { assert!(account_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1)); assert_eq!( - db.table::().unwrap().len(), + db.table::().unwrap().len(), changesets.iter().flatten().count() ); - let original_shards = db.table::().unwrap(); + let original_shards = db.table::().unwrap(); let test_prune = |to_block: BlockNumber, run: usize, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); @@ -201,11 +201,11 @@ mod tests { ); assert_eq!( - db.table::().unwrap().len(), + db.table::().unwrap().len(), pruned_changesets.values().flatten().count() ); - let actual_shards = db.table::().unwrap(); + let actual_shards = db.table::().unwrap(); let expected_shards = original_shards .iter() diff --git a/crates/prune/src/segments/headers.rs b/crates/prune/src/segments/headers.rs index f0e1754422d..de6bcf9b187 100644 --- a/crates/prune/src/segments/headers.rs +++ b/crates/prune/src/segments/headers.rs @@ -58,7 +58,11 @@ impl Segment for Headers { delete_limit, )?, self.prune_table::(provider, block_range.clone(), delete_limit)?, - self.prune_table::(provider, block_range, delete_limit)?, + self.prune_table::( + provider, + block_range, + delete_limit, + )?, ]; if !results.iter().map(|(_, _, last_pruned_block)| last_pruned_block).all_equal() { @@ -128,7 +132,7 @@ mod tests { assert_eq!(db.table::().unwrap().len(), headers.len()); assert_eq!(db.table::().unwrap().len(), headers.len()); - assert_eq!(db.table::().unwrap().len(), headers.len()); + assert_eq!(db.table::().unwrap().len(), headers.len()); let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); @@ -181,7 +185,7 @@ mod tests { headers.len() - (last_pruned_block_number + 1) as usize ); assert_eq!( - db.table::().unwrap().len(), + db.table::().unwrap().len(), headers.len() - (last_pruned_block_number + 1) as usize ); assert_eq!( diff --git a/crates/prune/src/segments/sender_recovery.rs b/crates/prune/src/segments/sender_recovery.rs index ec2d189f55c..421ff77173e 100644 --- a/crates/prune/src/segments/sender_recovery.rs +++ b/crates/prune/src/segments/sender_recovery.rs @@ -43,7 +43,7 @@ impl Segment for SenderRecovery { let tx_range_end = *tx_range.end(); let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.prune_table_with_range::( + let (pruned, done) = provider.prune_table_with_range::( tx_range, input.delete_limit, |_| false, @@ -110,7 +110,7 @@ mod tests { ); assert_eq!( db.table::().unwrap().len(), - db.table::().unwrap().len() + db.table::().unwrap().len() ); let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { @@ -178,7 +178,7 @@ mod tests { last_pruned_block_number.checked_sub(if result.done { 0 } else { 1 }); assert_eq!( - db.table::().unwrap().len(), + db.table::().unwrap().len(), transaction_senders.len() - (last_pruned_tx_number + 1) ); assert_eq!( diff --git a/crates/prune/src/segments/storage_history.rs b/crates/prune/src/segments/storage_history.rs index 286c5695e6d..7c0da3b8413 100644 --- a/crates/prune/src/segments/storage_history.rs +++ b/crates/prune/src/segments/storage_history.rs @@ -50,7 +50,7 @@ impl Segment for StorageHistory { let mut last_changeset_pruned_block = None; let (pruned_changesets, done) = provider - .prune_table_with_range::( + .prune_table_with_range::( BlockNumberAddress::range(range), input.delete_limit / 2, |_| false, @@ -64,7 +64,7 @@ impl Segment for StorageHistory { .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) .unwrap_or(range_end); - let (processed, pruned_indices) = prune_history_indices::( + let (processed, pruned_indices) = prune_history_indices::( provider, last_changeset_pruned_block, |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, @@ -117,7 +117,7 @@ mod tests { db.insert_changesets(changesets.clone(), None).expect("insert changesets"); db.insert_history(changesets.clone(), None).expect("insert history"); - let storage_occurrences = db.table::().unwrap().into_iter().fold( + let storage_occurrences = db.table::().unwrap().into_iter().fold( BTreeMap::<_, usize>::new(), |mut map, (key, _)| { map.entry((key.address, key.sharded_key.key)).or_default().add_assign(1); @@ -127,11 +127,11 @@ mod tests { assert!(storage_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1)); assert_eq!( - db.table::().unwrap().len(), + db.table::().unwrap().len(), changesets.iter().flatten().flat_map(|(_, _, entries)| entries).count() ); - let original_shards = db.table::().unwrap(); + let original_shards = db.table::().unwrap(); let test_prune = |to_block: BlockNumber, run: usize, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); @@ -207,11 +207,11 @@ mod tests { ); assert_eq!( - db.table::().unwrap().len(), + db.table::().unwrap().len(), pruned_changesets.values().flatten().count() ); - let actual_shards = db.table::().unwrap(); + let actual_shards = db.table::().unwrap(); let expected_shards = original_shards .iter() diff --git a/crates/prune/src/segments/transaction_lookup.rs b/crates/prune/src/segments/transaction_lookup.rs index 342a764a68a..6b1dfc8d2c3 100644 --- a/crates/prune/src/segments/transaction_lookup.rs +++ b/crates/prune/src/segments/transaction_lookup.rs @@ -61,7 +61,7 @@ impl Segment for TransactionLookup { } let mut last_pruned_transaction = None; - let (pruned, _) = provider.prune_table_with_iterator::( + let (pruned, _) = provider.prune_table_with_iterator::( hashes, input.delete_limit, |row| { @@ -129,7 +129,7 @@ mod tests { ); assert_eq!( db.table::().unwrap().len(), - db.table::().unwrap().len() + db.table::().unwrap().len() ); let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { @@ -197,7 +197,7 @@ mod tests { last_pruned_block_number.checked_sub(if result.done { 0 } else { 1 }); assert_eq!( - db.table::().unwrap().len(), + db.table::().unwrap().len(), tx_hash_numbers.len() - (last_pruned_tx_number + 1) ); assert_eq!( diff --git a/crates/snapshot/src/segments/headers.rs b/crates/snapshot/src/segments/headers.rs index 0a524e86c3b..feb2b1f2936 100644 --- a/crates/snapshot/src/segments/headers.rs +++ b/crates/snapshot/src/segments/headers.rs @@ -54,7 +54,7 @@ impl Segment for Headers { self.dataset_for_compression::( provider, &range, range_len, )?, - self.dataset_for_compression::( + self.dataset_for_compression::( provider, &range, range_len, )?, self.dataset_for_compression::( @@ -78,7 +78,7 @@ impl Segment for Headers { create_snapshot_T1_T2_T3::< tables::Headers, - tables::HeaderTD, + tables::HeaderTerminalDifficulties, tables::CanonicalHeaders, BlockNumber, SegmentHeader, diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index e7a1ef4bca4..f5f7e54ed14 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -172,7 +172,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { // initialize TD db.commit(|tx| { let (head, _) = tx.cursor_read::()?.first()?.unwrap_or_default(); - Ok(tx.put::(head, U256::from(0).into())?) + Ok(tx.put::(head, U256::from(0).into())?) }) .unwrap(); } diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index cf9b4dc64b6..672af2b49bd 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -35,7 +35,7 @@ use tracing::*; /// - [`BlockOmmers`][reth_db::tables::BlockOmmers] /// - [`BlockBodies`][reth_db::tables::BlockBodyIndices] /// - [`Transactions`][reth_db::tables::Transactions] -/// - [`TransactionBlock`][reth_db::tables::TransactionBlock] +/// - [`TransactionBlocks`][reth_db::tables::TransactionBlocks] /// /// # Genesis /// @@ -110,7 +110,7 @@ impl Stage for BodyStage { let tx = provider.tx_ref(); let mut block_indices_cursor = tx.cursor_write::()?; let mut tx_cursor = tx.cursor_write::()?; - let mut tx_block_cursor = tx.cursor_write::()?; + let mut tx_block_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; @@ -197,7 +197,7 @@ impl Stage for BodyStage { let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; // Cursors to unwind transitions - let mut tx_block_cursor = tx.cursor_write::()?; + let mut tx_block_cursor = tx.cursor_write::()?; let mut rev_walker = body_cursor.walk_back(None)?; while let Some((number, block_meta)) = rev_walker.next().transpose()? { @@ -587,7 +587,7 @@ mod tests { })?; if body.tx_count != 0 { - tx.put::( + tx.put::( body.first_tx_num(), progress.number, )?; @@ -633,7 +633,7 @@ mod tests { if let Some(last_tx_id) = self.get_last_tx_id()? { self.db .ensure_no_entry_above::(last_tx_id, |key| key)?; - self.db.ensure_no_entry_above::( + self.db.ensure_no_entry_above::( last_tx_id, |key| key, )?; @@ -669,7 +669,7 @@ mod tests { let mut bodies_cursor = tx.cursor_read::()?; let mut ommers_cursor = tx.cursor_read::()?; let mut transaction_cursor = tx.cursor_read::()?; - let mut tx_block_cursor = tx.cursor_read::()?; + let mut tx_block_cursor = tx.cursor_read::()?; let first_body_key = match bodies_cursor.first()? { Some((key, _)) => key, diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 5de44ab7049..bcac2b3428d 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -33,7 +33,7 @@ use tracing::*; /// Input tables: /// - [tables::CanonicalHeaders] get next block to execute. /// - [tables::Headers] get for revm environment variables. -/// - [tables::HeaderTD] +/// - [tables::HeaderTerminalDifficulties] /// - [tables::BlockBodyIndices] to get tx number /// - [tables::Transactions] to execute /// @@ -47,13 +47,14 @@ use tracing::*; /// - [tables::PlainAccountState] /// - [tables::PlainStorageState] /// - [tables::Bytecodes] -/// - [tables::AccountChangeSet] -/// - [tables::StorageChangeSet] +/// - [tables::AccountChangeSets] +/// - [tables::StorageChangeSets] /// /// For unwinds we are accessing: /// - [tables::BlockBodyIndices] get tx index to know what needs to be unwinded -/// - [tables::AccountHistory] to remove change set and apply old values to -/// - [tables::PlainAccountState] [tables::StorageHistory] to remove change set and apply old values +/// - [tables::AccountsHistory] to remove change set and apply old values to +/// - [tables::PlainAccountState] [tables::StoragesHistory] to remove change set and apply old +/// values /// to [tables::PlainStorageState] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] @@ -352,8 +353,8 @@ impl Stage for ExecutionStage { ) -> Result { let tx = provider.tx_ref(); // Acquire changeset cursors - let mut account_changeset = tx.cursor_dup_write::()?; - let mut storage_changeset = tx.cursor_dup_write::()?; + let mut account_changeset = tx.cursor_dup_write::()?; + let mut storage_changeset = tx.cursor_dup_write::()?; let (range, unwind_to, _) = input.unwind_block_range_with_threshold(self.thresholds.max_blocks.unwrap_or(u64::MAX)); @@ -399,7 +400,7 @@ impl Stage for ExecutionStage { } // Discard unwinded changesets - provider.unwind_table_by_num::(unwind_to)?; + provider.unwind_table_by_num::(unwind_to)?; let mut rev_storage_changeset_walker = storage_changeset.walk_back(None)?; while let Some((key, _)) = rev_storage_changeset_walker.next().transpose()? { @@ -940,8 +941,8 @@ mod tests { ); assert!(plain_storage.is_empty()); - let account_changesets = test_db.table::().unwrap(); - let storage_changesets = test_db.table::().unwrap(); + let account_changesets = test_db.table::().unwrap(); + let storage_changesets = test_db.table::().unwrap(); assert_eq!( account_changesets, diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 70b61d58976..7e500dca20f 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -77,7 +77,7 @@ impl AccountHashingStage { /// at the target block, with `txs_range` transactions in each block. /// /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the - /// account state in the `AccountChangeSet` table. + /// account state in the `AccountChangeSets` table. pub fn seed( provider: &DatabaseProviderRW, opts: SeedOpts, @@ -108,7 +108,7 @@ impl AccountHashingStage { } let mut acc_changeset_cursor = - provider.tx_ref().cursor_write::()?; + provider.tx_ref().cursor_write::()?; for (t, (addr, acc)) in (opts.blocks).zip(&accounts) { let Account { nonce, balance, .. } = acc; let prev_acc = Account { @@ -166,7 +166,7 @@ impl Stage for AccountHashingStage { } _ => { // clear table, load all accounts and hash it - tx.clear::()?; + tx.clear::()?; None } @@ -213,7 +213,7 @@ impl Stage for AccountHashingStage { hashed_batch.par_sort_unstable_by(|a, b| a.0.cmp(&b.0)); let mut hashed_account_cursor = - tx.cursor_write::>()?; + tx.cursor_write::>()?; // iterate and put presorted hashed accounts if start_address.is_none() { @@ -291,7 +291,7 @@ fn stage_checkpoint_progress( provider: &DatabaseProviderRW, ) -> Result { Ok(EntitiesCheckpoint { - processed: provider.tx_ref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, total: provider.tx_ref().entries::()? as u64, }) } @@ -400,7 +400,7 @@ mod tests { }) if address == fifth_address && total == runner.db.table::().unwrap().len() as u64 ); - assert_eq!(runner.db.table::().unwrap().len(), 5); + assert_eq!(runner.db.table::().unwrap().len(), 5); // second run, hash next five accounts. input.checkpoint = Some(result.unwrap().checkpoint); @@ -427,7 +427,7 @@ mod tests { }) if processed == total && total == runner.db.table::().unwrap().len() as u64 ); - assert_eq!(runner.db.table::().unwrap().len(), 10); + assert_eq!(runner.db.table::().unwrap().len(), 10); // Validate the stage execution assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation"); @@ -455,11 +455,11 @@ mod tests { } /// Iterates over PlainAccount table and checks that the accounts match the ones - /// in the HashedAccount table + /// in the HashedAccounts table pub(crate) fn check_hashed_accounts(&self) -> Result<(), TestRunnerError> { self.db.query(|tx| { let mut acc_cursor = tx.cursor_read::()?; - let mut hashed_acc_cursor = tx.cursor_read::()?; + let mut hashed_acc_cursor = tx.cursor_read::()?; while let Some((address, account)) = acc_cursor.next()? { let hashed_addr = keccak256(address); @@ -478,7 +478,7 @@ mod tests { pub(crate) fn check_old_hashed_accounts(&self) -> Result<(), TestRunnerError> { self.db.query(|tx| { let mut acc_cursor = tx.cursor_read::()?; - let mut hashed_acc_cursor = tx.cursor_read::()?; + let mut hashed_acc_cursor = tx.cursor_read::()?; while let Some((address, account)) = acc_cursor.next()? { let Account { nonce, balance, .. } = account; diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index b4f8f3582f0..f396001c395 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -90,7 +90,7 @@ impl Stage for StorageHashingStage { } _ => { // clear table, load all accounts and hash it - tx.clear::()?; + tx.clear::()?; (None, None) } @@ -152,7 +152,7 @@ impl Stage for StorageHashingStage { // iterate and put presorted hashed slots hashed_batch.into_iter().try_for_each(|((addr, key), value)| { - tx.put::(addr, StorageEntry { key, value }) + tx.put::(addr, StorageEntry { key, value }) })?; if current_key.is_some() { @@ -216,7 +216,7 @@ fn stage_checkpoint_progress( provider: &DatabaseProviderRW, ) -> Result { Ok(EntitiesCheckpoint { - processed: provider.tx_ref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, total: provider.tx_ref().entries::()? as u64, }) } @@ -363,7 +363,7 @@ mod tests { }) if address == progress_address && storage == progress_key && total == runner.db.table::().unwrap().len() as u64 ); - assert_eq!(runner.db.table::().unwrap().len(), 500); + assert_eq!(runner.db.table::().unwrap().len(), 500); // second run with commit threshold of 2 to check if subkey is set. runner.set_commit_threshold(2); @@ -409,7 +409,7 @@ mod tests { }) if address == progress_address && storage == progress_key && total == runner.db.table::().unwrap().len() as u64 ); - assert_eq!(runner.db.table::().unwrap().len(), 502); + assert_eq!(runner.db.table::().unwrap().len(), 502); // third last run, hash rest of storages. runner.set_commit_threshold(1000); @@ -442,7 +442,7 @@ mod tests { total == runner.db.table::().unwrap().len() as u64 ); assert_eq!( - runner.db.table::().unwrap().len(), + runner.db.table::().unwrap().len(), runner.db.table::().unwrap().len() ); @@ -501,7 +501,10 @@ mod tests { self.db.commit(|tx| { progress.body.iter().try_for_each( |transaction| -> Result<(), reth_db::DatabaseError> { - tx.put::(transaction.hash(), next_tx_num)?; + tx.put::( + transaction.hash(), + next_tx_num, + )?; tx.put::( next_tx_num, transaction.clone().into(), @@ -594,7 +597,7 @@ mod tests { .query(|tx| { let mut storage_cursor = tx.cursor_dup_read::()?; let mut hashed_storage_cursor = - tx.cursor_dup_read::()?; + tx.cursor_dup_read::()?; let mut expected = 0; @@ -609,7 +612,7 @@ mod tests { ); expected += 1; } - let count = tx.cursor_dup_read::()?.walk(None)?.count(); + let count = tx.cursor_dup_read::()?.walk(None)?.count(); assert_eq!(count, expected); Ok(()) @@ -641,18 +644,18 @@ mod tests { let hashed_entry = StorageEntry { key: keccak256(entry.key), value: entry.value }; if let Some(e) = tx - .cursor_dup_write::()? + .cursor_dup_write::()? .seek_by_key_subkey(hashed_address, hashed_entry.key)? .filter(|e| e.key == hashed_entry.key) { - tx.delete::(hashed_address, Some(e)) + tx.delete::(hashed_address, Some(e)) .expect("failed to delete entry"); } - tx.put::(hashed_address, hashed_entry)?; + tx.put::(hashed_address, hashed_entry)?; } - tx.put::(bn_address, prev_entry)?; + tx.put::(bn_address, prev_entry)?; Ok(()) } @@ -661,7 +664,7 @@ mod tests { let target_block = input.unwind_to; self.db.commit(|tx| { let mut storage_cursor = tx.cursor_dup_write::()?; - let mut changeset_cursor = tx.cursor_dup_read::()?; + let mut changeset_cursor = tx.cursor_dup_read::()?; let mut rev_changeset_walker = changeset_cursor.walk_back(None)?; diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index bc0dc05ace5..83a212dcb4d 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -391,8 +391,12 @@ mod tests { let head = random_header(&mut rng, start, None); self.db.insert_headers(std::iter::once(&head))?; // patch td table for `update_head` call - self.db - .commit(|tx| Ok(tx.put::(head.number, U256::ZERO.into())?))?; + self.db.commit(|tx| { + Ok(tx.put::( + head.number, + U256::ZERO.into(), + )?) + })?; // use previous checkpoint as seed size let end = input.target.unwrap_or_default() + 1; diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 64c482ad1c9..71c9c33e27e 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -12,7 +12,7 @@ use std::fmt::Debug; /// Stage is indexing history the account changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information -/// on index sharding take a look at [`reth_db::tables::AccountHistory`] +/// on index sharding take a look at [`reth_db::tables::AccountsHistory`] #[derive(Debug)] pub struct IndexAccountHistoryStage { /// Number of blocks after which the control @@ -164,7 +164,7 @@ mod tests { StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, )?; // setup changeset that is going to be applied to history index - tx.put::(block, acc())?; + tx.put::(block, acc())?; } Ok(()) }) @@ -209,14 +209,14 @@ mod tests { run(&db, 3, None); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3])])); // unwind unwind(&db, 3, 0); // verify initial state - let table = db.table::().unwrap(); + let table = db.table::().unwrap(); assert!(table.is_empty()); } @@ -228,7 +228,7 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&[1, 2, 3])).unwrap(); + tx.put::(shard(u64::MAX), list(&[1, 2, 3])).unwrap(); Ok(()) }) .unwrap(); @@ -237,14 +237,14 @@ mod tests { run(&db, 5, Some(3)); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3, 4, 5])])); // unwind unwind(&db, 5, 3); // verify initial state - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3])])); } @@ -258,7 +258,7 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&full_list)).unwrap(); + tx.put::(shard(u64::MAX), list(&full_list)).unwrap(); Ok(()) }) .unwrap(); @@ -267,7 +267,7 @@ mod tests { run(&db, LAST_BLOCK_IN_FULL_SHARD + 2, Some(LAST_BLOCK_IN_FULL_SHARD)); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -280,7 +280,7 @@ mod tests { unwind(&db, LAST_BLOCK_IN_FULL_SHARD + 2, LAST_BLOCK_IN_FULL_SHARD); // verify initial state - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), full_list)])); } @@ -293,7 +293,7 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&almost_full_list)).unwrap(); + tx.put::(shard(u64::MAX), list(&almost_full_list)).unwrap(); Ok(()) }) .unwrap(); @@ -304,7 +304,7 @@ mod tests { // verify almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD - 1); almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD); - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list.clone())])); // unwind @@ -313,7 +313,7 @@ mod tests { // verify initial state almost_full_list.pop(); almost_full_list.pop(); - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list)])); // verify initial state @@ -328,7 +328,7 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&almost_full_list)).unwrap(); + tx.put::(shard(u64::MAX), list(&almost_full_list)).unwrap(); Ok(()) }) .unwrap(); @@ -338,7 +338,7 @@ mod tests { // verify almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD); - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -352,7 +352,7 @@ mod tests { // verify initial state almost_full_list.pop(); - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list)])); } @@ -365,9 +365,9 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(1), list(&full_list)).unwrap(); - tx.put::(shard(2), list(&full_list)).unwrap(); - tx.put::( + tx.put::(shard(1), list(&full_list)).unwrap(); + tx.put::(shard(2), list(&full_list)).unwrap(); + tx.put::( shard(u64::MAX), list(&[LAST_BLOCK_IN_FULL_SHARD + 1]), ) @@ -379,7 +379,7 @@ mod tests { run(&db, LAST_BLOCK_IN_FULL_SHARD + 2, Some(LAST_BLOCK_IN_FULL_SHARD + 1)); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -393,7 +393,7 @@ mod tests { unwind(&db, LAST_BLOCK_IN_FULL_SHARD + 2, LAST_BLOCK_IN_FULL_SHARD + 1); // verify initial state - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -425,9 +425,9 @@ mod tests { .unwrap(); // setup changeset that are going to be applied to history index - tx.put::(20, acc()).unwrap(); - tx.put::(36, acc()).unwrap(); - tx.put::(100, acc()).unwrap(); + tx.put::(20, acc()).unwrap(); + tx.put::(36, acc()).unwrap(); + tx.put::(100, acc()).unwrap(); Ok(()) }) .unwrap(); @@ -444,14 +444,14 @@ mod tests { provider.commit().unwrap(); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100])])); // unwind unwind(&db, 20000, 0); // verify initial state - let table = db.table::().unwrap(); + let table = db.table::().unwrap(); assert!(table.is_empty()); } @@ -530,7 +530,7 @@ mod tests { let provider = self.db.factory.provider()?; let mut changeset_cursor = - provider.tx_ref().cursor_read::()?; + provider.tx_ref().cursor_read::()?; let account_transitions = changeset_cursor.walk_range(start_block..=end_block)?.try_fold( @@ -571,7 +571,7 @@ mod tests { }; } - let table = cast(self.db.table::().unwrap()); + let table = cast(self.db.table::().unwrap()); assert_eq!(table, result); } Ok(()) @@ -580,7 +580,7 @@ mod tests { impl UnwindStageTestRunner for IndexAccountHistoryTestRunner { fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { - let table = self.db.table::().unwrap(); + let table = self.db.table::().unwrap(); assert!(table.is_empty()); Ok(()) } diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index bc4c1bd2e41..7f2a9d154ff 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -11,7 +11,7 @@ use std::fmt::Debug; /// Stage is indexing history the account changesets generated in /// [`ExecutionStage`][crate::stages::ExecutionStage]. For more information -/// on index sharding take a look at [`reth_db::tables::StorageHistory`]. +/// on index sharding take a look at [`reth_db::tables::StoragesHistory`]. #[derive(Debug)] pub struct IndexStorageHistoryStage { /// Number of blocks after which the control @@ -172,7 +172,7 @@ mod tests { StoredBlockBodyIndices { tx_count: 3, ..Default::default() }, )?; // setup changeset that is going to be applied to history index - tx.put::( + tx.put::( block_number_address(block), storage(STORAGE_KEY), )?; @@ -220,14 +220,14 @@ mod tests { run(&db, 3, None); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3])])); // unwind unwind(&db, 5, 0); // verify initial state - let table = db.table::().unwrap(); + let table = db.table::().unwrap(); assert!(table.is_empty()); } @@ -239,7 +239,7 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&[1, 2, 3])).unwrap(); + tx.put::(shard(u64::MAX), list(&[1, 2, 3])).unwrap(); Ok(()) }) .unwrap(); @@ -248,14 +248,14 @@ mod tests { run(&db, 5, Some(3)); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3, 4, 5])])); // unwind unwind(&db, 5, 3); // verify initial state - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3])])); } @@ -269,7 +269,7 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&full_list)).unwrap(); + tx.put::(shard(u64::MAX), list(&full_list)).unwrap(); Ok(()) }) .unwrap(); @@ -278,7 +278,7 @@ mod tests { run(&db, LAST_BLOCK_IN_FULL_SHARD + 2, Some(LAST_BLOCK_IN_FULL_SHARD)); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -291,7 +291,7 @@ mod tests { unwind(&db, LAST_BLOCK_IN_FULL_SHARD + 2, LAST_BLOCK_IN_FULL_SHARD); // verify initial state - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), full_list)])); } @@ -304,7 +304,7 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&almost_full_list)).unwrap(); + tx.put::(shard(u64::MAX), list(&almost_full_list)).unwrap(); Ok(()) }) .unwrap(); @@ -315,7 +315,7 @@ mod tests { // verify almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD - 1); almost_full_list.push(LAST_BLOCK_IN_FULL_SHARD); - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list.clone())])); // unwind @@ -324,7 +324,7 @@ mod tests { // verify initial state almost_full_list.pop(); almost_full_list.pop(); - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), almost_full_list)])); // verify initial state @@ -339,7 +339,7 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(u64::MAX), list(&close_full_list)).unwrap(); + tx.put::(shard(u64::MAX), list(&close_full_list)).unwrap(); Ok(()) }) .unwrap(); @@ -349,7 +349,7 @@ mod tests { // verify close_full_list.push(LAST_BLOCK_IN_FULL_SHARD); - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -363,7 +363,7 @@ mod tests { // verify initial state close_full_list.pop(); - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list)])); } @@ -376,9 +376,9 @@ mod tests { // setup partial_setup(&db); db.commit(|tx| { - tx.put::(shard(1), list(&full_list)).unwrap(); - tx.put::(shard(2), list(&full_list)).unwrap(); - tx.put::( + tx.put::(shard(1), list(&full_list)).unwrap(); + tx.put::(shard(2), list(&full_list)).unwrap(); + tx.put::( shard(u64::MAX), list(&[LAST_BLOCK_IN_FULL_SHARD + 1]), ) @@ -390,7 +390,7 @@ mod tests { run(&db, LAST_BLOCK_IN_FULL_SHARD + 2, Some(LAST_BLOCK_IN_FULL_SHARD + 1)); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -404,7 +404,7 @@ mod tests { unwind(&db, LAST_BLOCK_IN_FULL_SHARD + 2, LAST_BLOCK_IN_FULL_SHARD + 1); // verify initial state - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -436,11 +436,11 @@ mod tests { .unwrap(); // setup changeset that are going to be applied to history index - tx.put::(block_number_address(20), storage(STORAGE_KEY)) + tx.put::(block_number_address(20), storage(STORAGE_KEY)) .unwrap(); - tx.put::(block_number_address(36), storage(STORAGE_KEY)) + tx.put::(block_number_address(36), storage(STORAGE_KEY)) .unwrap(); - tx.put::(block_number_address(100), storage(STORAGE_KEY)) + tx.put::(block_number_address(100), storage(STORAGE_KEY)) .unwrap(); Ok(()) }) @@ -458,14 +458,14 @@ mod tests { provider.commit().unwrap(); // verify - let table = cast(db.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100])])); // unwind unwind(&db, 20000, 0); // verify initial state - let table = db.table::().unwrap(); + let table = db.table::().unwrap(); assert!(table.is_empty()); } @@ -544,7 +544,7 @@ mod tests { let provider = self.db.factory.provider()?; let mut changeset_cursor = - provider.tx_ref().cursor_read::()?; + provider.tx_ref().cursor_read::()?; let storage_transitions = changeset_cursor .walk_range(BlockNumberAddress::range(start_block..=end_block))? @@ -593,7 +593,7 @@ mod tests { }; } - let table = cast(self.db.table::().unwrap()); + let table = cast(self.db.table::().unwrap()); assert_eq!(table, result); } Ok(()) @@ -602,7 +602,7 @@ mod tests { impl UnwindStageTestRunner for IndexStorageHistoryTestRunner { fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { - let table = self.db.table::().unwrap(); + let table = self.db.table::().unwrap(); assert!(table.is_empty()); Ok(()) } diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 8a7d5883f3d..8866262e737 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -184,8 +184,8 @@ impl Stage for MerkleStage { } .unwrap_or(EntitiesCheckpoint { processed: 0, - total: (provider.tx_ref().entries::()? + - provider.tx_ref().entries::()?) + total: (provider.tx_ref().entries::()? + + provider.tx_ref().entries::()?) as u64, }); @@ -230,8 +230,8 @@ impl Stage for MerkleStage { .map_err(|e| StageError::Fatal(Box::new(e)))?; updates.flush(provider.tx_ref())?; - let total_hashed_entries = (provider.tx_ref().entries::()? + - provider.tx_ref().entries::()?) + let total_hashed_entries = (provider.tx_ref().entries::()? + + provider.tx_ref().entries::()?) as u64; let entities_checkpoint = EntitiesCheckpoint { @@ -273,8 +273,8 @@ impl Stage for MerkleStage { let mut entities_checkpoint = input.checkpoint.entities_stage_checkpoint().unwrap_or(EntitiesCheckpoint { processed: 0, - total: (tx.entries::()? + - tx.entries::()?) as u64, + total: (tx.entries::()? + + tx.entries::()?) as u64, }); if input.unwind_to == 0 { @@ -385,8 +385,8 @@ mod tests { done: true }) if block_number == previous_stage && processed == total && total == ( - runner.db.table::().unwrap().len() + - runner.db.table::().unwrap().len() + runner.db.table::().unwrap().len() + + runner.db.table::().unwrap().len() ) as u64 ); @@ -425,8 +425,8 @@ mod tests { done: true }) if block_number == previous_stage && processed == total && total == ( - runner.db.table::().unwrap().len() + - runner.db.table::().unwrap().len() + runner.db.table::().unwrap().len() + + runner.db.table::().unwrap().len() ) as u64 ); @@ -506,8 +506,8 @@ mod tests { // Calculate state root let root = self.db.query(|tx| { let mut accounts = BTreeMap::default(); - let mut accounts_cursor = tx.cursor_read::()?; - let mut storage_cursor = tx.cursor_dup_read::()?; + let mut accounts_cursor = tx.cursor_read::()?; + let mut storage_cursor = tx.cursor_dup_read::()?; for entry in accounts_cursor.walk_range(..)? { let (key, account) = entry?; let mut storage_entries = Vec::new(); @@ -560,9 +560,9 @@ mod tests { self.db .commit(|tx| { let mut storage_changesets_cursor = - tx.cursor_dup_read::().unwrap(); + tx.cursor_dup_read::().unwrap(); let mut storage_cursor = - tx.cursor_dup_write::().unwrap(); + tx.cursor_dup_write::().unwrap(); let mut tree: BTreeMap> = BTreeMap::new(); @@ -596,7 +596,7 @@ mod tests { } let mut changeset_cursor = - tx.cursor_dup_write::().unwrap(); + tx.cursor_dup_write::().unwrap(); let mut rev_changeset_walker = changeset_cursor.walk_back(None).unwrap(); while let Some((block_number, account_before_tx)) = @@ -607,13 +607,13 @@ mod tests { } if let Some(acc) = account_before_tx.info { - tx.put::( + tx.put::( keccak256(account_before_tx.address), acc, ) .unwrap(); } else { - tx.delete::( + tx.delete::( keccak256(account_before_tx.address), None, ) diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 6e63083d753..a48b8d2d516 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -47,7 +47,7 @@ mod tests { tables, test_utils::TempDatabase, transaction::{DbTx, DbTxMut}, - AccountHistory, DatabaseEnv, + AccountsHistory, DatabaseEnv, }; use reth_interfaces::test_utils::generators::{self, random_block}; use reth_node_ethereum::EthEvmConfig; @@ -165,8 +165,8 @@ mod tests { assert!(acc_indexing_stage.execute(&provider, input).is_err()); } else { acc_indexing_stage.execute(&provider, input).unwrap(); - let mut account_history: Cursor = - provider.tx_ref().cursor_read::().unwrap(); + let mut account_history: Cursor = + provider.tx_ref().cursor_read::().unwrap(); assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets); } @@ -183,7 +183,7 @@ mod tests { storage_indexing_stage.execute(&provider, input).unwrap(); let mut storage_history = - provider.tx_ref().cursor_read::().unwrap(); + provider.tx_ref().cursor_read::().unwrap(); assert_eq!( storage_history.walk(None).unwrap().count(), expect_num_storage_changesets diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index a758b9b6bc0..fcbe313748d 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -22,7 +22,7 @@ use tracing::*; /// The sender recovery stage iterates over existing transactions, /// recovers the transaction signer and stores them -/// in [`TxSenders`][reth_db::tables::TxSenders] table. +/// in [`TransactionSenders`][reth_db::tables::TransactionSenders] table. #[derive(Clone, Debug)] pub struct SenderRecoveryStage { /// The size of inserted items after which the control @@ -51,9 +51,8 @@ impl Stage for SenderRecoveryStage { /// Retrieve the range of transactions to iterate over by querying /// [`BlockBodyIndices`][reth_db::tables::BlockBodyIndices], - /// collect transactions within that range, - /// recover signer for each transaction and store entries in - /// the [`TxSenders`][reth_db::tables::TxSenders] table. + /// collect transactions within that range, recover signer for each transaction and store + /// entries in the [`TransactionSenders`][reth_db::tables::TransactionSenders] table. fn execute( &mut self, provider: &DatabaseProviderRW, @@ -80,7 +79,7 @@ impl Stage for SenderRecoveryStage { let tx = provider.tx_ref(); // Acquire the cursor for inserting elements - let mut senders_cursor = tx.cursor_write::()?; + let mut senders_cursor = tx.cursor_write::()?; // Acquire the cursor over the transactions let mut tx_cursor = tx.cursor_read::>()?; @@ -135,7 +134,7 @@ impl Stage for SenderRecoveryStage { SenderRecoveryStageError::FailedRecovery(err) => { // get the block number for the bad transaction let block_number = tx - .get::(err.tx)? + .get::(err.tx)? .ok_or(ProviderError::BlockNumberForTransactionIndexNotFound)?; // fetch the sealed header so we can use it in the sender recovery @@ -178,7 +177,7 @@ impl Stage for SenderRecoveryStage { .block_body_indices(unwind_to)? .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))? .last_tx_num(); - provider.unwind_table_by_num::(latest_tx_id)?; + provider.unwind_table_by_num::(latest_tx_id)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) @@ -219,10 +218,11 @@ fn stage_checkpoint( .and_then(|checkpoint| checkpoint.tx_number) .unwrap_or_default(); Ok(EntitiesCheckpoint { - // If `TxSenders` table was pruned, we will have a number of entries in it not matching - // the actual number of processed transactions. To fix that, we add the number of pruned - // `TxSenders` entries. - processed: provider.tx_ref().entries::()? as u64 + pruned_entries, + // If `TransactionSenders` table was pruned, we will have a number of entries in it not + // matching the actual number of processed transactions. To fix that, we add the + // number of pruned `TransactionSenders` entries. + processed: provider.tx_ref().entries::()? as u64 + + pruned_entries, total: provider.tx_ref().entries::()? as u64, }) } @@ -353,7 +353,8 @@ mod tests { ExecOutput { checkpoint: StageCheckpoint::new(expected_progress).with_entities_stage_checkpoint( EntitiesCheckpoint { - processed: runner.db.table::().unwrap().len() as u64, + processed: runner.db.table::().unwrap().len() + as u64, total: total_transactions } ), @@ -455,10 +456,11 @@ mod tests { /// # Panics /// - /// 1. If there are any entries in the [tables::TxSenders] table above a given block number. + /// 1. If there are any entries in the [tables::TransactionSenders] table above a given + /// block number. /// - /// 2. If the is no requested block entry in the bodies table, but [tables::TxSenders] is - /// not empty. + /// 2. If the is no requested block entry in the bodies table, but + /// [tables::TransactionSenders] is not empty. fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db @@ -467,11 +469,12 @@ mod tests { .block_body_indices(block)? .ok_or(ProviderError::BlockBodyIndicesNotFound(block)); match body_result { - Ok(body) => self - .db - .ensure_no_entry_above::(body.last_tx_num(), |key| key)?, + Ok(body) => self.db.ensure_no_entry_above::( + body.last_tx_num(), + |key| key, + )?, Err(_) => { - assert!(self.db.table_is_empty::()?); + assert!(self.db.table_is_empty::()?); } }; diff --git a/crates/stages/src/stages/total_difficulty.rs b/crates/stages/src/stages/total_difficulty.rs index 6e2c152c4a3..eccac5181e7 100644 --- a/crates/stages/src/stages/total_difficulty.rs +++ b/crates/stages/src/stages/total_difficulty.rs @@ -18,8 +18,8 @@ use tracing::*; /// The total difficulty stage. /// /// This stage walks over inserted headers and computes total difficulty -/// at each block. The entries are inserted into [`HeaderTD`][reth_db::tables::HeaderTD] -/// table. +/// at each block. The entries are inserted into +/// [`HeaderTerminalDifficulties`][reth_db::tables::HeaderTerminalDifficulties] table. #[derive(Debug, Clone)] pub struct TotalDifficultyStage { /// Consensus client implementation @@ -64,7 +64,7 @@ impl Stage for TotalDifficultyStage { debug!(target: "sync::stages::total_difficulty", start_block, end_block, "Commencing sync"); // Acquire cursor over total difficulty and headers tables - let mut cursor_td = tx.cursor_write::()?; + let mut cursor_td = tx.cursor_write::()?; let mut cursor_headers = tx.cursor_read::()?; // Get latest total difficulty @@ -105,7 +105,7 @@ impl Stage for TotalDifficultyStage { ) -> Result { let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_table_by_num::(unwind_to)?; + provider.unwind_table_by_num::(unwind_to)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) @@ -118,7 +118,7 @@ fn stage_checkpoint( provider: &DatabaseProviderRW, ) -> Result { Ok(EntitiesCheckpoint { - processed: provider.tx_ref().entries::()? as u64, + processed: provider.tx_ref().entries::()? as u64, total: provider.tx_ref().entries::()? as u64, }) } @@ -234,12 +234,15 @@ mod tests { self.db.insert_headers(std::iter::once(&head))?; self.db.commit(|tx| { let td: U256 = tx - .cursor_read::()? + .cursor_read::()? .last()? .map(|(_, v)| v) .unwrap_or_default() .into(); - tx.put::(head.number, (td + head.difficulty).into())?; + tx.put::( + head.number, + (td + head.difficulty).into(), + )?; Ok(()) })?; @@ -299,7 +302,8 @@ mod tests { impl TotalDifficultyTestRunner { fn check_no_td_above(&self, block: BlockNumber) -> Result<(), TestRunnerError> { - self.db.ensure_no_entry_above::(block, |num| num)?; + self.db + .ensure_no_entry_above::(block, |num| num)?; Ok(()) } diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index d5c9202bab7..63b6527d5a5 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -21,7 +21,8 @@ use tracing::*; /// /// This stage walks over the bodies table, and sets the transaction hash of each transaction in a /// block to the corresponding `BlockNumber` at each block. This is written to the -/// [`tables::TxHashNumber`] This is used for looking up changesets via the transaction hash. +/// [`tables::TransactionHashNumbers`] This is used for looking up changesets via the transaction +/// hash. #[derive(Debug, Clone)] pub struct TransactionLookupStage { /// The number of lookup entries to commit at once @@ -98,7 +99,7 @@ impl Stage for TransactionLookupStage { tx_list.par_sort_unstable_by(|txa, txb| txa.0.cmp(&txb.0)); let tx = provider.tx_ref(); - let mut txhash_cursor = tx.cursor_write::()?; + let mut txhash_cursor = tx.cursor_write::()?; // If the last inserted element in the database is equal or bigger than the first // in our set, then we need to insert inside the DB. If it is smaller then last @@ -137,7 +138,7 @@ impl Stage for TransactionLookupStage { // Cursors to unwind tx hash to number let mut body_cursor = tx.cursor_read::()?; - let mut tx_hash_number_cursor = tx.cursor_write::()?; + let mut tx_hash_number_cursor = tx.cursor_write::()?; let mut transaction_cursor = tx.cursor_read::()?; let mut rev_walker = body_cursor.walk_back(Some(*range.end()))?; while let Some((number, body)) = rev_walker.next().transpose()? { @@ -173,10 +174,11 @@ fn stage_checkpoint( .map(|tx_number| tx_number + 1) .unwrap_or_default(); Ok(EntitiesCheckpoint { - // If `TxHashNumber` table was pruned, we will have a number of entries in it not matching - // the actual number of processed transactions. To fix that, we add the number of pruned - // `TxHashNumber` entries. - processed: provider.tx_ref().entries::()? as u64 + pruned_entries, + // If `TransactionHashNumbers` table was pruned, we will have a number of entries in it not + // matching the actual number of processed transactions. To fix that, we add the + // number of pruned `TransactionHashNumbers` entries. + processed: provider.tx_ref().entries::()? as u64 + + pruned_entries, total: provider.tx_ref().entries::()? as u64, }) } @@ -285,7 +287,11 @@ mod tests { ExecOutput { checkpoint: StageCheckpoint::new(expected_progress).with_entities_stage_checkpoint( EntitiesCheckpoint { - processed: runner.db.table::().unwrap().len() as u64, + processed: runner + .db + .table::() + .unwrap() + .len() as u64, total: total_txs } ), @@ -432,11 +438,11 @@ mod tests { /// # Panics /// - /// 1. If there are any entries in the [tables::TxHashNumber] table above a given block - /// number. + /// 1. If there are any entries in the [tables::TransactionHashNumbers] table above a given + /// block number. /// - /// 2. If the is no requested block entry in the bodies table, but [tables::TxHashNumber] is - /// not empty. + /// 2. If the is no requested block entry in the bodies table, but + /// [tables::TransactionHashNumbers] is not empty. fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db @@ -445,12 +451,14 @@ mod tests { .block_body_indices(number)? .ok_or(ProviderError::BlockBodyIndicesNotFound(number)); match body_result { - Ok(body) => self.db.ensure_no_entry_above_by_value::( - body.last_tx_num(), - |key| key, - )?, + Ok(body) => { + self.db.ensure_no_entry_above_by_value::( + body.last_tx_num(), + |key| key, + )? + } Err(_) => { - assert!(self.db.table_is_empty::()?); + assert!(self.db.table_is_empty::()?); } }; diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index 54af31db0cc..7115a7c7722 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -142,7 +142,7 @@ impl TestStageDB { headers.into_iter().try_for_each(|header| { Self::insert_header(tx, header)?; td += header.difficulty; - Ok(tx.put::(header.number, td.into())?) + Ok(tx.put::(header.number, td.into())?) }) }) } @@ -167,7 +167,7 @@ impl TestStageDB { }; if !block.body.is_empty() { - tx.put::( + tx.put::( block_body_indices.last_tx_num(), block.number, )?; @@ -190,7 +190,7 @@ impl TestStageDB { self.commit(|tx| { tx_hash_numbers.into_iter().try_for_each(|(tx_hash, tx_num)| { // Insert into tx hash numbers table. - Ok(tx.put::(tx_hash, tx_num)?) + Ok(tx.put::(tx_hash, tx_num)?) }) }) } @@ -215,7 +215,7 @@ impl TestStageDB { self.commit(|tx| { transaction_senders.into_iter().try_for_each(|(tx_num, sender)| { // Insert into receipts table. - Ok(tx.put::(tx_num, sender)?) + Ok(tx.put::(tx_num, sender)?) }) }) } @@ -232,7 +232,7 @@ impl TestStageDB { // Insert into account tables. tx.put::(address, account)?; - tx.put::(hashed_address, account)?; + tx.put::(hashed_address, account)?; // Insert into storage tables. storage.into_iter().filter(|e| e.value != U256::ZERO).try_for_each(|entry| { @@ -248,7 +248,7 @@ impl TestStageDB { } cursor.upsert(address, entry)?; - let mut cursor = tx.cursor_dup_write::()?; + let mut cursor = tx.cursor_dup_write::()?; if cursor .seek_by_key_subkey(hashed_address, hashed_entry.key)? .filter(|e| e.key == hashed_entry.key) @@ -279,7 +279,7 @@ impl TestStageDB { changeset.into_iter().try_for_each(|(address, old_account, old_storage)| { let block = offset + block as u64; // Insert into account changeset. - tx.put::( + tx.put::( block, AccountBeforeTx { address, info: Some(old_account) }, )?; @@ -288,7 +288,7 @@ impl TestStageDB { // Insert into storage changeset. old_storage.into_iter().try_for_each(|entry| { - Ok(tx.put::(block_address, entry)?) + Ok(tx.put::(block_address, entry)?) }) }) }) diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index 39f9dc164f4..54dca69b211 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -23,12 +23,12 @@ pub fn db(c: &mut Criterion) { group.warm_up_time(std::time::Duration::from_millis(200)); measure_table_db::(&mut group); - measure_table_db::(&mut group); + measure_table_db::(&mut group); measure_table_db::(&mut group); measure_table_db::(&mut group); measure_table_db::(&mut group); measure_table_db::(&mut group); - measure_table_db::(&mut group); + measure_table_db::(&mut group); measure_table_db::(&mut group); measure_dupsort_db::(&mut group); measure_table_db::(&mut group); @@ -40,12 +40,12 @@ pub fn serialization(c: &mut Criterion) { group.warm_up_time(std::time::Duration::from_millis(200)); measure_table_serialization::(&mut group); - measure_table_serialization::(&mut group); + measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); - measure_table_serialization::(&mut group); + measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); measure_table_serialization::(&mut group); diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index 1266b8c8199..5376bf5040c 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -9,7 +9,7 @@ use proptest::{ strategy::{Strategy, ValueTree}, test_runner::TestRunner, }; -use reth_db::{cursor::DbCursorRW, TxHashNumber}; +use reth_db::{cursor::DbCursorRW, TransactionHashNumbers}; use std::collections::HashSet; criterion_group! { @@ -34,7 +34,7 @@ pub fn hash_keys(c: &mut Criterion) { group.sample_size(10); for size in [10_000, 100_000, 1_000_000] { - measure_table_insertion::(&mut group, size); + measure_table_insertion::(&mut group, size); } } diff --git a/crates/storage/db/benches/iai.rs b/crates/storage/db/benches/iai.rs index cd153774361..9079933511c 100644 --- a/crates/storage/db/benches/iai.rs +++ b/crates/storage/db/benches/iai.rs @@ -79,12 +79,12 @@ macro_rules! impl_iai { impl_iai!( CanonicalHeaders, - HeaderTD, + HeaderTerminalDifficulties, HeaderNumbers, Headers, BlockBodyIndices, BlockOmmers, - TxHashNumber, + TransactionHashNumbers, Transactions, PlainStorageState, PlainAccountState diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index cb0bcce08f3..ee93e5e4838 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -360,10 +360,12 @@ mod tests { abstraction::table::{Encode, Table}, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, models::{AccountBeforeTx, ShardedKey}, - tables::{AccountHistory, CanonicalHeaders, Headers, PlainAccountState, PlainStorageState}, + tables::{ + AccountsHistory, CanonicalHeaders, Headers, PlainAccountState, PlainStorageState, + }, test_utils::*, transaction::{DbTx, DbTxMut}, - AccountChangeSet, + AccountChangeSets, }; use reth_interfaces::db::{DatabaseWriteError, DatabaseWriteOperation}; use reth_libmdbx::Error; @@ -519,24 +521,24 @@ mod tests { let address2 = Address::with_last_byte(2); let tx = db.tx_mut().expect(ERROR_INIT_TX); - tx.put::(0, AccountBeforeTx { address: address0, info: None }) + tx.put::(0, AccountBeforeTx { address: address0, info: None }) .expect(ERROR_PUT); - tx.put::(0, AccountBeforeTx { address: address1, info: None }) + tx.put::(0, AccountBeforeTx { address: address1, info: None }) .expect(ERROR_PUT); - tx.put::(0, AccountBeforeTx { address: address2, info: None }) + tx.put::(0, AccountBeforeTx { address: address2, info: None }) .expect(ERROR_PUT); - tx.put::(1, AccountBeforeTx { address: address0, info: None }) + tx.put::(1, AccountBeforeTx { address: address0, info: None }) .expect(ERROR_PUT); - tx.put::(1, AccountBeforeTx { address: address1, info: None }) + tx.put::(1, AccountBeforeTx { address: address1, info: None }) .expect(ERROR_PUT); - tx.put::(1, AccountBeforeTx { address: address2, info: None }) + tx.put::(1, AccountBeforeTx { address: address2, info: None }) .expect(ERROR_PUT); - tx.put::(2, AccountBeforeTx { address: address0, info: None }) // <- should not be returned by the walker + tx.put::(2, AccountBeforeTx { address: address0, info: None }) // <- should not be returned by the walker .expect(ERROR_PUT); tx.commit().expect(ERROR_COMMIT); let tx = db.tx().expect(ERROR_INIT_TX); - let mut cursor = tx.cursor_read::().unwrap(); + let mut cursor = tx.cursor_read::().unwrap(); let entries = cursor.walk_range(..).unwrap().collect::, _>>().unwrap(); assert_eq!(entries.len(), 7); @@ -933,7 +935,7 @@ mod tests { let transition_id = 2; let tx = db.tx_mut().expect(ERROR_INIT_TX); - let mut cursor = tx.cursor_write::().unwrap(); + let mut cursor = tx.cursor_write::().unwrap(); vec![0, 1, 3, 4, 5] .into_iter() .try_for_each(|val| { @@ -948,7 +950,7 @@ mod tests { // APPEND DUP & APPEND let subkey_to_append = 2; let tx = db.tx_mut().expect(ERROR_INIT_TX); - let mut cursor = tx.cursor_write::().unwrap(); + let mut cursor = tx.cursor_write::().unwrap(); assert_eq!( cursor.append_dup( transition_id, @@ -957,7 +959,7 @@ mod tests { Err(DatabaseWriteError { info: Error::KeyMismatch.into(), operation: DatabaseWriteOperation::CursorAppendDup, - table_name: AccountChangeSet::NAME, + table_name: AccountChangeSets::NAME, key: transition_id.encode().into(), } .into()) @@ -970,7 +972,7 @@ mod tests { Err(DatabaseWriteError { info: Error::KeyMismatch.into(), operation: DatabaseWriteOperation::CursorAppend, - table_name: AccountChangeSet::NAME, + table_name: AccountChangeSets::NAME, key: (transition_id - 1).encode().into(), } .into()) @@ -1159,13 +1161,14 @@ mod tests { let key = ShardedKey::new(real_key, i * 100); let list: IntegerList = vec![i * 100u64].into(); - db.update(|tx| tx.put::(key.clone(), list.clone()).expect("")).unwrap(); + db.update(|tx| tx.put::(key.clone(), list.clone()).expect("")) + .unwrap(); } // Seek value with non existing key. { let tx = db.tx().expect(ERROR_INIT_TX); - let mut cursor = tx.cursor_read::().unwrap(); + let mut cursor = tx.cursor_read::().unwrap(); // It will seek the one greater or equal to the query. Since we have `Address | 100`, // `Address | 200` in the database and we're querying `Address | 150` it will return us @@ -1183,7 +1186,7 @@ mod tests { // Seek greatest index { let tx = db.tx().expect(ERROR_INIT_TX); - let mut cursor = tx.cursor_read::().unwrap(); + let mut cursor = tx.cursor_read::().unwrap(); // It will seek the MAX value of transition index and try to use prev to get first // biggers. diff --git a/crates/storage/db/src/snapshot/masks.rs b/crates/storage/db/src/snapshot/masks.rs index aecf151ebd8..2bc7bb416a7 100644 --- a/crates/storage/db/src/snapshot/masks.rs +++ b/crates/storage/db/src/snapshot/masks.rs @@ -3,20 +3,20 @@ use crate::{ add_snapshot_mask, snapshot::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, table::Table, - CanonicalHeaders, HeaderTD, Receipts, Transactions, + CanonicalHeaders, HeaderTerminalDifficulties, Receipts, Transactions, }; use reth_primitives::{BlockHash, Header}; // HEADER MASKS add_snapshot_mask!(HeaderMask, Header, 0b001); -add_snapshot_mask!(HeaderMask, ::Value, 0b010); +add_snapshot_mask!(HeaderMask, ::Value, 0b010); add_snapshot_mask!(HeaderMask, BlockHash, 0b100); add_snapshot_mask!(HeaderMask, Header, BlockHash, 0b101); add_snapshot_mask!( HeaderMask, - ::Value, + ::Value, ::Value, 0b110 ); diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 3ccce8a9e1b..3f3c6bf176a 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -246,7 +246,7 @@ tables! { table CanonicalHeaders; /// Stores the total difficulty from a block header. - table HeaderTD; + table HeaderTerminalDifficulties; /// Stores the block number corresponding to a header. table HeaderNumbers; @@ -269,12 +269,12 @@ tables! { table Transactions; /// Stores the mapping of the transaction hash to the transaction number. - table TxHashNumber; + table TransactionHashNumbers; /// Stores the mapping of transaction number to the blocks number. /// /// The key is the highest transaction ID in the block. - table TransactionBlock; + table TransactionBlocks; /// Canonical only Stores transaction receipts. table Receipts; @@ -309,7 +309,7 @@ tables! { /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` - table AccountHistory, Value = BlockNumberList>; + table AccountsHistory, Value = BlockNumberList>; /// Stores pointers to block number changeset with changes for each storage key. /// @@ -329,29 +329,29 @@ tables! { /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` - table StorageHistory; + table StoragesHistory; /// Stores the state of an account before a certain transaction changed it. /// Change on state can be: account is created, selfdestructed, touched while empty /// or changed balance,nonce. - table AccountChangeSet; + table AccountChangeSets; /// Stores the state of a storage key before a certain transaction changed it. /// If [`StorageEntry::value`] is zero, this means storage was not existing /// and needs to be removed. - table StorageChangeSet; + table StorageChangeSets; /// Stores the current state of an [`Account`] indexed with `keccak256Address` /// This table is in preparation for merkelization and calculation of state root. /// We are saving whole account data as it is needed for partial update when /// part of storage is changed. Benefit for merkelization is that hashed addresses are sorted. - table HashedAccount; + table HashedAccounts; /// Stores the current storage values indexed with `keccak256Address` and /// hash of storage key `keccak256key`. /// This table is in preparation for merkelization and calculation of state root. /// Benefit for merklization is that hashed addresses/keys are sorted. - table HashedStorage; + table HashedStorages; /// Stores the current state's Merkle Patricia Tree. table AccountsTrie; @@ -362,13 +362,13 @@ tables! { /// Stores the transaction sender for each canonical transaction. /// It is needed to speed up execution stage and allows fetching signer without doing /// transaction signed recovery - table TxSenders; + table TransactionSenders; /// Stores the highest synced block number and stage-specific checkpoint of each stage. - table SyncStage; + table StageCheckpoints; /// Stores arbitrary data to keep track of a stage first-sync progress. - table SyncStageProgress>; + table StageCheckpointProgresses>; /// Stores the highest pruned block number and prune mode of each prune segment. table PruneCheckpoints; diff --git a/crates/storage/db/src/tables/models/accounts.rs b/crates/storage/db/src/tables/models/accounts.rs index 767f321d902..9b926c0203b 100644 --- a/crates/storage/db/src/tables/models/accounts.rs +++ b/crates/storage/db/src/tables/models/accounts.rs @@ -11,7 +11,7 @@ use reth_codecs::{derive_arbitrary, Compact}; use reth_primitives::{Account, Address, BlockNumber, Buf}; use serde::{Deserialize, Serialize}; -/// Account as it is saved inside [`AccountChangeSet`][crate::tables::AccountChangeSet]. +/// Account as it is saved inside [`AccountChangeSets`][crate::tables::AccountChangeSets]. /// /// [`Address`] is the subkey. #[derive_arbitrary(compact)] @@ -57,7 +57,7 @@ impl Compact for AccountBeforeTx { } /// [`BlockNumber`] concatenated with [`Address`]. Used as the key for -/// [`StorageChangeSet`](crate::tables::StorageChangeSet) +/// [`StorageChangeSets`](crate::tables::StorageChangeSets) /// /// Since it's used as a key, it isn't compressed when encoding it. #[derive( diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index c87c6ede80a..b6be30d91b5 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -426,7 +426,7 @@ mod tests { // Check change set let mut changeset_cursor = provider .tx_ref() - .cursor_dup_read::() + .cursor_dup_read::() .expect("Could not open changeset cursor"); assert_eq!( changeset_cursor.seek_exact(1).expect("Could not read account change set"), @@ -594,7 +594,7 @@ mod tests { // Check change set let mut changeset_cursor = provider .tx_ref() - .cursor_dup_read::() + .cursor_dup_read::() .expect("Could not open storage changeset cursor"); assert_eq!( changeset_cursor.seek_exact(BlockNumberAddress((1, address_a))).unwrap(), @@ -861,7 +861,7 @@ mod tests { let mut storage_changeset_cursor = provider .tx_ref() - .cursor_dup_read::() + .cursor_dup_read::() .expect("Could not open plain storage state cursor"); let mut storage_changes = storage_changeset_cursor.walk_range(..).unwrap(); @@ -1069,7 +1069,7 @@ mod tests { let mut storage_changeset_cursor = provider .tx_ref() - .cursor_dup_read::() + .cursor_dup_read::() .expect("Could not open plain storage state cursor"); let range = BlockNumberAddress::range(1..=1); let mut storage_changes = storage_changeset_cursor.walk_range(range).unwrap(); @@ -1138,9 +1138,9 @@ mod tests { db.update(|tx| { for (address, (account, storage)) in prestate.iter() { let hashed_address = keccak256(address); - tx.put::(hashed_address, *account).unwrap(); + tx.put::(hashed_address, *account).unwrap(); for (slot, value) in storage { - tx.put::( + tx.put::( hashed_address, StorageEntry { key: keccak256(slot), value: *value }, ) diff --git a/crates/storage/provider/src/bundle_state/hashed_state_changes.rs b/crates/storage/provider/src/bundle_state/hashed_state_changes.rs index 0c14ec49f53..0ddccd8e71b 100644 --- a/crates/storage/provider/src/bundle_state/hashed_state_changes.rs +++ b/crates/storage/provider/src/bundle_state/hashed_state_changes.rs @@ -17,7 +17,7 @@ impl HashedStateChanges { pub fn write_to_db(self, tx: &TX) -> Result<(), DatabaseError> { // Write hashed account updates. let sorted_accounts = self.0.accounts.into_iter().sorted_unstable_by_key(|(key, _)| *key); - let mut hashed_accounts_cursor = tx.cursor_write::()?; + let mut hashed_accounts_cursor = tx.cursor_write::()?; for (hashed_address, account) in sorted_accounts { if let Some(account) = account { hashed_accounts_cursor.upsert(hashed_address, account)?; @@ -28,7 +28,7 @@ impl HashedStateChanges { // Write hashed storage changes. let sorted_storages = self.0.storages.into_iter().sorted_by_key(|(key, _)| *key); - let mut hashed_storage_cursor = tx.cursor_dup_write::()?; + let mut hashed_storage_cursor = tx.cursor_dup_write::()?; for (hashed_address, storage) in sorted_storages { if storage.wiped && hashed_storage_cursor.seek_exact(hashed_address)?.is_some() { hashed_storage_cursor.delete_current_duplicates()?; @@ -74,9 +74,9 @@ mod tests { { let provider_rw = provider_factory.provider_rw().unwrap(); let mut accounts_cursor = - provider_rw.tx_ref().cursor_write::().unwrap(); + provider_rw.tx_ref().cursor_write::().unwrap(); let mut storage_cursor = - provider_rw.tx_ref().cursor_write::().unwrap(); + provider_rw.tx_ref().cursor_write::().unwrap(); for address in addresses { let hashed_address = keccak256(address); @@ -100,13 +100,13 @@ mod tests { let provider = provider_factory.provider().unwrap(); assert_eq!( - provider.tx_ref().get::(destroyed_address_hashed), + provider.tx_ref().get::(destroyed_address_hashed), Ok(None) ); assert_eq!( provider .tx_ref() - .cursor_read::() + .cursor_read::() .unwrap() .seek_by_key_subkey(destroyed_address_hashed, hashed_slot), Ok(None) diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 8b7d5c7c283..87f87403169 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -32,7 +32,7 @@ impl StateReverts { // Write storage changes tracing::trace!(target: "provider::reverts", "Writing storage changes"); let mut storages_cursor = tx.cursor_dup_write::()?; - let mut storage_changeset_cursor = tx.cursor_dup_write::()?; + let mut storage_changeset_cursor = tx.cursor_dup_write::()?; for (block_index, mut storage_changes) in self.0.storage.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; @@ -73,7 +73,7 @@ impl StateReverts { // Write account changes tracing::trace!(target: "provider::reverts", "Writing account changes"); - let mut account_changeset_cursor = tx.cursor_dup_write::()?; + let mut account_changeset_cursor = tx.cursor_dup_write::()?; for (block_index, mut account_block_reverts) in self.0.accounts.into_iter().enumerate() { let block_number = first_block + block_index as BlockNumber; // Sort accounts by address. diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 8fb3a1cd3b0..c103ae5f6d0 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -50,14 +50,14 @@ pub(crate) enum Action { InsertCanonicalHeaders, InsertHeaders, InsertHeaderNumbers, - InsertHeaderTD, + InsertHeaderTerminalDifficulties, InsertBlockOmmers, - InsertTxSenders, + InsertTransactionSenders, InsertTransactions, - InsertTxHashNumbers, + InsertTransactionHashNumbers, InsertBlockWithdrawals, InsertBlockBodyIndices, - InsertTransactionBlock, + InsertTransactionBlocks, GetNextTxNum, GetParentTD, @@ -77,14 +77,14 @@ impl Action { Action::InsertCanonicalHeaders => "insert canonical headers", Action::InsertHeaders => "insert headers", Action::InsertHeaderNumbers => "insert header numbers", - Action::InsertHeaderTD => "insert header TD", + Action::InsertHeaderTerminalDifficulties => "insert header TD", Action::InsertBlockOmmers => "insert block ommers", - Action::InsertTxSenders => "insert tx senders", + Action::InsertTransactionSenders => "insert tx senders", Action::InsertTransactions => "insert transactions", - Action::InsertTxHashNumbers => "insert tx hash numbers", + Action::InsertTransactionHashNumbers => "insert transaction hash numbers", Action::InsertBlockWithdrawals => "insert block withdrawals", Action::InsertBlockBodyIndices => "insert block body indices", - Action::InsertTransactionBlock => "insert transaction block", + Action::InsertTransactionBlocks => "insert transaction blocks", Action::GetNextTxNum => "get next tx num", Action::GetParentTD => "get parent TD", } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 61e5ed8df51..06d36237d1e 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -646,7 +646,7 @@ mod tests { Ok(_) ); - let senders = provider.get_or_take::(range.clone()); + let senders = provider.get_or_take::(range.clone()); assert_eq!( senders, Ok(range diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 7e95bb8594e..7b7366c571a 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -200,9 +200,9 @@ where continue } else if block_number <= sharded_key.as_ref().highest_block_number { // Filter out all elements greater than block number. - return Ok(list.iter().take_while(|i| *i < block_number).collect::>()); + return Ok(list.iter().take_while(|i| *i < block_number).collect::>()) } else { - return Ok(list.iter().collect::>()); + return Ok(list.iter().collect::>()) } } @@ -383,9 +383,9 @@ impl DatabaseProvider { /// /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all /// the transaction ids. - /// 2. Iterate over the [StorageChangeSet][tables::StorageChangeSet] table - /// and the [AccountChangeSet][tables::AccountChangeSet] tables in reverse order to reconstruct - /// the changesets. + /// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table + /// and the [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to + /// reconstruct the changesets. /// - In order to have both the old and new values in the changesets, we also access the /// plain state tables. /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, @@ -419,8 +419,8 @@ impl DatabaseProvider { let storage_range = BlockNumberAddress::range(range.clone()); let storage_changeset = - self.get_or_take::(storage_range)?; - let account_changeset = self.get_or_take::(range)?; + self.get_or_take::(storage_range)?; + let account_changeset = self.get_or_take::(range)?; // iterate previous value and get plain state value to create changeset // Double option around Account represent if Account state is know (first option) and @@ -600,8 +600,9 @@ impl DatabaseProvider { .map(|(id, tx)| (id, tx.into())) .collect::>(); - let mut senders = - self.get_or_take::(first_transaction..=last_transaction)?; + let mut senders = self.get_or_take::( + first_transaction..=last_transaction, + )?; // Recover senders manually if not found in db // NOTE: Transactions are always guaranteed to be in the database whereas @@ -663,18 +664,18 @@ impl DatabaseProvider { } if TAKE { - // Remove TxHashNumber - let mut tx_hash_cursor = self.tx.cursor_write::()?; + // Remove TransactionHashNumbers + let mut tx_hash_cursor = self.tx.cursor_write::()?; for (_, tx) in transactions.iter() { if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { tx_hash_cursor.delete_current()?; } } - // Remove TransactionBlock index if there are transaction present + // Remove TransactionBlocks index if there are transaction present if !transactions.is_empty() { let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.get_or_take::(tx_id_range)?; + self.get_or_take::(tx_id_range)?; } } @@ -731,8 +732,8 @@ impl DatabaseProvider { let block_tx = self.get_take_block_transaction_range::(range.clone())?; if TAKE { - // rm HeaderTD - self.get_or_take::(range)?; + // rm HeaderTerminalDifficulties + self.get_or_take::(range)?; // rm HeaderNumbers let mut header_number_cursor = self.tx.cursor_write::()?; for (_, hash) in block_header_hashes.iter() { @@ -923,7 +924,7 @@ impl DatabaseProvider { // delete old shard so new one can be inserted. self.tx.delete::(shard_key, None)?; let list = list.iter().collect::>(); - return Ok(list); + return Ok(list) } Ok(Vec::new()) } @@ -984,7 +985,7 @@ impl AccountExtReader for DatabaseProvider { range: impl RangeBounds, ) -> ProviderResult> { self.tx - .cursor_read::()? + .cursor_read::()? .walk_range(range)? .map(|entry| { entry.map(|(_, account_before)| account_before.address).map_err(Into::into) @@ -1007,7 +1008,7 @@ impl AccountExtReader for DatabaseProvider { &self, range: RangeInclusive, ) -> ProviderResult>> { - let mut changeset_cursor = self.tx.cursor_read::()?; + let mut changeset_cursor = self.tx.cursor_read::()?; let account_transitions = changeset_cursor.walk_range(range)?.try_fold( BTreeMap::new(), @@ -1029,7 +1030,7 @@ impl ChangeSetReader for DatabaseProvider { ) -> ProviderResult> { let range = block_number..=block_number; self.tx - .cursor_read::()? + .cursor_read::()? .walk_range(range)? .map(|result| -> ProviderResult<_> { let (_, account_before) = result?; @@ -1127,7 +1128,7 @@ impl HeaderProvider for DatabaseProvider { SnapshotSegment::Headers, number, |snapshot| snapshot.header_td_by_number(number), - || Ok(self.tx.get::(number)?.map(|td| td.0)), + || Ok(self.tx.get::(number)?.map(|td| td.0)), ) } @@ -1498,7 +1499,7 @@ impl TransactionsProviderExt for DatabaseProvider { impl TransactionsProvider for DatabaseProvider { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - Ok(self.tx.get::(tx_hash)?) + Ok(self.tx.get::(tx_hash)?) } fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { @@ -1539,7 +1540,7 @@ impl TransactionsProvider for DatabaseProvider { &self, tx_hash: TxHash, ) -> ProviderResult> { - let mut transaction_cursor = self.tx.cursor_read::()?; + let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { let transaction = TransactionSigned { @@ -1579,7 +1580,7 @@ impl TransactionsProvider for DatabaseProvider { } fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - let mut cursor = self.tx.cursor_read::()?; + let mut cursor = self.tx.cursor_read::()?; Ok(cursor.seek(id)?.map(|(_, bn)| bn)) } @@ -1645,11 +1646,11 @@ impl TransactionsProvider for DatabaseProvider { &self, range: impl RangeBounds, ) -> ProviderResult> { - self.cursor_read_collect::(range, Ok).map_err(Into::into) + self.cursor_read_collect::(range, Ok).map_err(Into::into) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - Ok(self.tx.get::(id)?) + Ok(self.tx.get::(id)?) } } @@ -1836,12 +1837,12 @@ impl EvmEnvProvider for DatabaseProvider { impl StageCheckpointReader for DatabaseProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { - Ok(self.tx.get::(id.to_string())?) + Ok(self.tx.get::(id.to_string())?) } /// Get stage checkpoint progress. fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - Ok(self.tx.get::(id.to_string())?) + Ok(self.tx.get::(id.to_string())?) } } @@ -1852,7 +1853,7 @@ impl StageCheckpointWriter for DatabaseProvider { id: StageId, checkpoint: StageCheckpoint, ) -> ProviderResult<()> { - Ok(self.tx.put::(id.to_string(), checkpoint)?) + Ok(self.tx.put::(id.to_string(), checkpoint)?) } /// Save stage checkpoint progress. @@ -1861,7 +1862,7 @@ impl StageCheckpointWriter for DatabaseProvider { id: StageId, checkpoint: Vec, ) -> ProviderResult<()> { - Ok(self.tx.put::(id.to_string(), checkpoint)?) + Ok(self.tx.put::(id.to_string(), checkpoint)?) } fn update_pipeline_stages( @@ -1870,7 +1871,7 @@ impl StageCheckpointWriter for DatabaseProvider { drop_stage_checkpoint: bool, ) -> ProviderResult<()> { // iterate over all existing stages in the table and update its progress. - let mut cursor = self.tx.cursor_write::()?; + let mut cursor = self.tx.cursor_write::()?; for stage_id in StageId::ALL { let (_, checkpoint) = cursor.seek_exact(stage_id.to_string())?.unwrap_or_default(); cursor.upsert( @@ -1915,7 +1916,7 @@ impl StorageReader for DatabaseProvider { range: RangeInclusive, ) -> ProviderResult>> { self.tx - .cursor_read::()? + .cursor_read::()? .walk_range(BlockNumberAddress::range(range))? // fold all storages and save its old state so we can remove it from HashedStorage // it is needed as it is dup table. @@ -1930,7 +1931,7 @@ impl StorageReader for DatabaseProvider { &self, range: RangeInclusive, ) -> ProviderResult>> { - let mut changeset_cursor = self.tx.cursor_read::()?; + let mut changeset_cursor = self.tx.cursor_read::()?; let storage_changeset_lists = changeset_cursor.walk_range(BlockNumberAddress::range(range))?.try_fold( @@ -1959,7 +1960,7 @@ impl HashingWriter for DatabaseProvider { // changes are applied in the correct order. let hashed_accounts = self .tx - .cursor_read::()? + .cursor_read::()? .walk_range(range)? .map(|entry| entry.map(|(_, e)| (keccak256(e.address), e.info))) .collect::, _>>()? @@ -1968,7 +1969,7 @@ impl HashingWriter for DatabaseProvider { .collect::>(); // Apply values to HashedState, and remove the account if it's None. - let mut hashed_accounts_cursor = self.tx.cursor_write::()?; + let mut hashed_accounts_cursor = self.tx.cursor_write::()?; for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { hashed_accounts_cursor.upsert(*hashed_address, *account)?; @@ -1984,7 +1985,7 @@ impl HashingWriter for DatabaseProvider { &self, accounts: impl IntoIterator)>, ) -> ProviderResult>> { - let mut hashed_accounts_cursor = self.tx.cursor_write::()?; + let mut hashed_accounts_cursor = self.tx.cursor_write::()?; let hashed_accounts = accounts.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); for (hashed_address, account) in &hashed_accounts { @@ -2002,7 +2003,7 @@ impl HashingWriter for DatabaseProvider { range: Range, ) -> ProviderResult>> { // Aggregate all block changesets and make list of accounts that have been changed. - let mut changesets = self.tx.cursor_read::()?; + let mut changesets = self.tx.cursor_read::()?; let mut hashed_storages = changesets .walk_range(range)? .map(|entry| { @@ -2015,7 +2016,7 @@ impl HashingWriter for DatabaseProvider { // Apply values to HashedState, and remove the account if it's None. let mut hashed_storage_keys: HashMap> = HashMap::new(); - let mut hashed_storage = self.tx.cursor_dup_write::()?; + let mut hashed_storage = self.tx.cursor_dup_write::()?; for (hashed_address, key, value) in hashed_storages.into_iter().rev() { hashed_storage_keys.entry(hashed_address).or_default().insert(key); @@ -2054,7 +2055,7 @@ impl HashingWriter for DatabaseProvider { (*hashed_address, BTreeSet::from_iter(entries.keys().copied())) })); - let mut hashed_storage_cursor = self.tx.cursor_dup_write::()?; + let mut hashed_storage_cursor = self.tx.cursor_dup_write::()?; // Hash the address and key and apply them to HashedStorage (if Storage is None // just remove it); hashed_storages.into_iter().try_for_each(|(hashed_address, storage)| { @@ -2175,7 +2176,7 @@ impl HistoryWriter for DatabaseProvider { &self, storage_transitions: BTreeMap<(Address, B256), Vec>, ) -> ProviderResult<()> { - self.append_history_index::<_, tables::StorageHistory>( + self.append_history_index::<_, tables::StoragesHistory>( storage_transitions, |(address, storage_key), highest_block_number| { StorageShardedKey::new(address, storage_key, highest_block_number) @@ -2187,7 +2188,10 @@ impl HistoryWriter for DatabaseProvider { &self, account_transitions: BTreeMap>, ) -> ProviderResult<()> { - self.append_history_index::<_, tables::AccountHistory>(account_transitions, ShardedKey::new) + self.append_history_index::<_, tables::AccountsHistory>( + account_transitions, + ShardedKey::new, + ) } fn unwind_storage_history_indices( @@ -2196,7 +2200,7 @@ impl HistoryWriter for DatabaseProvider { ) -> ProviderResult { let mut storage_changesets = self .tx - .cursor_read::()? + .cursor_read::()? .walk_range(range)? .map(|entry| { entry.map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) @@ -2204,9 +2208,9 @@ impl HistoryWriter for DatabaseProvider { .collect::, _>>()?; storage_changesets.sort_by_key(|(address, key, _)| (*address, *key)); - let mut cursor = self.tx.cursor_write::()?; + let mut cursor = self.tx.cursor_write::()?; for &(address, storage_key, rem_index) in &storage_changesets { - let partial_shard = unwind_history_shards::<_, tables::StorageHistory, _>( + let partial_shard = unwind_history_shards::<_, tables::StoragesHistory, _>( &mut cursor, StorageShardedKey::last(address, storage_key), rem_index, @@ -2236,16 +2240,16 @@ impl HistoryWriter for DatabaseProvider { ) -> ProviderResult { let mut last_indices = self .tx - .cursor_read::()? + .cursor_read::()? .walk_range(range)? .map(|entry| entry.map(|(index, account)| (account.address, index))) .collect::, _>>()?; last_indices.sort_by_key(|(a, _)| *a); // Unwind the account history index. - let mut cursor = self.tx.cursor_write::()?; + let mut cursor = self.tx.cursor_write::()?; for &(address, rem_index) in &last_indices { - let partial_shard = unwind_history_shards::<_, tables::AccountHistory, _>( + let partial_shard = unwind_history_shards::<_, tables::AccountsHistory, _>( &mut cursor, ShardedKey::last(address), rem_index, @@ -2393,8 +2397,8 @@ impl BlockWriter for DatabaseProvider { parent_ttd + block.difficulty }; - self.tx.put::(block_number, ttd.into())?; - durations_recorder.record_relative(metrics::Action::InsertHeaderTD); + self.tx.put::(block_number, ttd.into())?; + durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); // insert body ommers data if !block.ommers.is_empty() { @@ -2430,7 +2434,7 @@ impl BlockWriter for DatabaseProvider { .is_none() { let start = Instant::now(); - self.tx.put::(next_tx_num, *sender)?; + self.tx.put::(next_tx_num, *sender)?; tx_senders_elapsed += start.elapsed(); } @@ -2455,16 +2459,19 @@ impl BlockWriter for DatabaseProvider { .is_none() { let start = Instant::now(); - self.tx.put::(hash, next_tx_num)?; + self.tx.put::(hash, next_tx_num)?; tx_hash_numbers_elapsed += start.elapsed(); } next_tx_num += 1; } - durations_recorder.record_duration(metrics::Action::InsertTxSenders, tx_senders_elapsed); durations_recorder - .record_duration(metrics::Action::InsertTransactions, transactions_elapsed); + .record_duration(metrics::Action::InsertTransactionSenders, tx_senders_elapsed); durations_recorder - .record_duration(metrics::Action::InsertTxHashNumbers, tx_hash_numbers_elapsed); + .record_duration(metrics::Action::InsertTransactions, transactions_elapsed); + durations_recorder.record_duration( + metrics::Action::InsertTransactionHashNumbers, + tx_hash_numbers_elapsed, + ); if let Some(withdrawals) = block.block.withdrawals { if !withdrawals.is_empty() { @@ -2481,8 +2488,8 @@ impl BlockWriter for DatabaseProvider { durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); if !block_indices.is_empty() { - self.tx.put::(block_indices.last_tx_num(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertTransactionBlock); + self.tx.put::(block_indices.last_tx_num(), block_number)?; + durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); } debug!( diff --git a/crates/storage/provider/src/providers/snapshot/jar.rs b/crates/storage/provider/src/providers/snapshot/jar.rs index ee1519c9f2b..7766c2906ad 100644 --- a/crates/storage/provider/src/providers/snapshot/jar.rs +++ b/crates/storage/provider/src/providers/snapshot/jar.rs @@ -205,12 +205,12 @@ impl<'a> TransactionsProvider for SnapshotJarProvider<'a> { &self, _hash: TxHash, ) -> ProviderResult> { - // Information required on indexing table [`tables::TransactionBlock`] + // Information required on indexing table [`tables::TransactionBlocks`] Err(ProviderError::UnsupportedProvider) } fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { - // Information on indexing table [`tables::TransactionBlock`] + // Information on indexing table [`tables::TransactionBlocks`] Err(ProviderError::UnsupportedProvider) } diff --git a/crates/storage/provider/src/providers/snapshot/mod.rs b/crates/storage/provider/src/providers/snapshot/mod.rs index 7a9327c291b..e1e34522a8e 100644 --- a/crates/storage/provider/src/providers/snapshot/mod.rs +++ b/crates/storage/provider/src/providers/snapshot/mod.rs @@ -47,7 +47,7 @@ mod tests { cursor::DbCursorRO, snapshot::create_snapshot_T1_T2_T3, transaction::{DbTx, DbTxMut}, - CanonicalHeaders, HeaderNumbers, HeaderTD, Headers, RawTable, + CanonicalHeaders, HeaderNumbers, HeaderTerminalDifficulties, Headers, RawTable, }; use reth_interfaces::test_utils::generators::{self, random_header_range}; use reth_primitives::{BlockNumber, B256, U256}; @@ -81,7 +81,7 @@ mod tests { tx.put::(header.number, hash).unwrap(); tx.put::(header.number, header.clone().unseal()).unwrap(); - tx.put::(header.number, td.into()).unwrap(); + tx.put::(header.number, td.into()).unwrap(); tx.put::(hash, header.number).unwrap(); } provider_rw.commit().unwrap(); @@ -117,7 +117,7 @@ mod tests { create_snapshot_T1_T2_T3::< Headers, - HeaderTD, + HeaderTerminalDifficulties, CanonicalHeaders, BlockNumber, SegmentHeader, @@ -148,7 +148,7 @@ mod tests { assert_eq!(header, db_provider.header(&header_hash).unwrap().unwrap()); assert_eq!(header, jar_provider.header(&header_hash).unwrap().unwrap()); - // Compare HeaderTD + // Compare HeaderTerminalDifficulties assert_eq!( db_provider.header_td(&header_hash).unwrap().unwrap(), jar_provider.header_td(&header_hash).unwrap().unwrap() diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 2eccf7ea710..497e126c254 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -24,11 +24,11 @@ use std::fmt::Debug; /// It means that all changes made in the provided block number are not included. /// /// Historical state provider reads the following tables: -/// - [tables::AccountHistory] +/// - [tables::AccountsHistory] /// - [tables::Bytecodes] -/// - [tables::StorageHistory] -/// - [tables::AccountChangeSet] -/// - [tables::StorageChangeSet] +/// - [tables::StoragesHistory] +/// - [tables::AccountChangeSets] +/// - [tables::StorageChangeSets] #[derive(Debug)] pub struct HistoricalStateProviderRef<'b, TX: DbTx> { /// Transaction @@ -63,7 +63,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { Self { tx, block_number, lowest_available_blocks } } - /// Lookup an account in the AccountHistory table + /// Lookup an account in the AccountsHistory table pub fn account_history_lookup(&self, address: Address) -> ProviderResult { if !self.lowest_available_blocks.is_account_history_available(self.block_number) { return Err(ProviderError::StateAtBlockPruned(self.block_number)) @@ -71,14 +71,14 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { // history key to search IntegerList of block number changesets. let history_key = ShardedKey::new(address, self.block_number); - self.history_info::( + self.history_info::( history_key, |key| key.key == address, self.lowest_available_blocks.account_history_block_number, ) } - /// Lookup a storage key in the StorageHistory table + /// Lookup a storage key in the StoragesHistory table pub fn storage_history_lookup( &self, address: Address, @@ -90,7 +90,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { // history key to search IntegerList of block number changesets. let history_key = StorageShardedKey::new(address, storage_key, self.block_number); - self.history_info::( + self.history_info::( history_key, |key| key.address == address && key.sharded_key.key == storage_key, self.lowest_available_blocks.storage_history_block_number, @@ -193,7 +193,7 @@ impl<'b, TX: DbTx> AccountReader for HistoricalStateProviderRef<'b, TX> { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(self .tx - .cursor_dup_read::()? + .cursor_dup_read::()? .seek_by_key_subkey(changeset_block_number, address)? .filter(|acc| acc.address == address) .ok_or(ProviderError::AccountChangesetNotFound { @@ -262,7 +262,7 @@ impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(Some( self.tx - .cursor_dup_read::()? + .cursor_dup_read::()? .seek_by_key_subkey((changeset_block_number, address).into(), storage_key)? .filter(|entry| entry.key == storage_key) .ok_or_else(|| ProviderError::StorageChangesetNotFound { @@ -403,17 +403,17 @@ mod tests { let db = create_test_rw_db(); let tx = db.tx_mut().unwrap(); - tx.put::( + tx.put::( ShardedKey { key: ADDRESS, highest_block_number: 7 }, BlockNumberList::new([1, 3, 7]).unwrap(), ) .unwrap(); - tx.put::( + tx.put::( ShardedKey { key: ADDRESS, highest_block_number: u64::MAX }, BlockNumberList::new([10, 15]).unwrap(), ) .unwrap(); - tx.put::( + tx.put::( ShardedKey { key: HIGHER_ADDRESS, highest_block_number: u64::MAX }, BlockNumberList::new([4]).unwrap(), ) @@ -428,29 +428,29 @@ mod tests { let higher_acc_plain = Account { nonce: 4, balance: U256::ZERO, bytecode_hash: None }; // setup - tx.put::(1, AccountBeforeTx { address: ADDRESS, info: None }) + tx.put::(1, AccountBeforeTx { address: ADDRESS, info: None }) .unwrap(); - tx.put::( + tx.put::( 3, AccountBeforeTx { address: ADDRESS, info: Some(acc_at3) }, ) .unwrap(); - tx.put::( + tx.put::( 4, AccountBeforeTx { address: HIGHER_ADDRESS, info: None }, ) .unwrap(); - tx.put::( + tx.put::( 7, AccountBeforeTx { address: ADDRESS, info: Some(acc_at7) }, ) .unwrap(); - tx.put::( + tx.put::( 10, AccountBeforeTx { address: ADDRESS, info: Some(acc_at10) }, ) .unwrap(); - tx.put::( + tx.put::( 15, AccountBeforeTx { address: ADDRESS, info: Some(acc_at15) }, ) @@ -510,7 +510,7 @@ mod tests { let db = create_test_rw_db(); let tx = db.tx_mut().unwrap(); - tx.put::( + tx.put::( StorageShardedKey { address: ADDRESS, sharded_key: ShardedKey { key: STORAGE, highest_block_number: 7 }, @@ -518,7 +518,7 @@ mod tests { BlockNumberList::new([3, 7]).unwrap(), ) .unwrap(); - tx.put::( + tx.put::( StorageShardedKey { address: ADDRESS, sharded_key: ShardedKey { key: STORAGE, highest_block_number: u64::MAX }, @@ -526,7 +526,7 @@ mod tests { BlockNumberList::new([10, 15]).unwrap(), ) .unwrap(); - tx.put::( + tx.put::( StorageShardedKey { address: HIGHER_ADDRESS, sharded_key: ShardedKey { key: STORAGE, highest_block_number: u64::MAX }, @@ -544,11 +544,11 @@ mod tests { let entry_at3 = StorageEntry { key: STORAGE, value: U256::from(0) }; // setup - tx.put::((3, ADDRESS).into(), entry_at3).unwrap(); - tx.put::((4, HIGHER_ADDRESS).into(), higher_entry_at4).unwrap(); - tx.put::((7, ADDRESS).into(), entry_at7).unwrap(); - tx.put::((10, ADDRESS).into(), entry_at10).unwrap(); - tx.put::((15, ADDRESS).into(), entry_at15).unwrap(); + tx.put::((3, ADDRESS).into(), entry_at3).unwrap(); + tx.put::((4, HIGHER_ADDRESS).into(), higher_entry_at4).unwrap(); + tx.put::((7, ADDRESS).into(), entry_at7).unwrap(); + tx.put::((10, ADDRESS).into(), entry_at10).unwrap(); + tx.put::((15, ADDRESS).into(), entry_at15).unwrap(); // setup plain state tx.put::(ADDRESS, entry_plain).unwrap(); diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index f7872449295..0d0aef625e2 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -26,7 +26,10 @@ pub fn assert_genesis_block(provider: &DatabaseProviderRW, g: assert_eq!(tx.table::().unwrap(), vec![(h, n)]); assert_eq!(tx.table::().unwrap(), vec![(n, h)]); - assert_eq!(tx.table::().unwrap(), vec![(n, g.difficulty.into())]); + assert_eq!( + tx.table::().unwrap(), + vec![(n, g.difficulty.into())] + ); assert_eq!( tx.table::().unwrap(), vec![(0, StoredBlockBodyIndices::default())] @@ -34,23 +37,23 @@ pub fn assert_genesis_block(provider: &DatabaseProviderRW, g: assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); // TODO check after this gets done: https://github.com/paradigmxyz/reth/issues/1588 // Bytecodes are not reverted assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); + assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); - // SyncStage is not updated in tests + assert_eq!(tx.table::().unwrap(), vec![]); + // StageCheckpoints is not updated in tests } const BLOCK_RLP: [u8; 610] = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0"); diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index b5a77247ed1..7978a4b1940 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -20,7 +20,7 @@ pub trait HashingWriter: Send + Sync { range: RangeInclusive, ) -> ProviderResult>>; - /// Inserts all accounts into [reth_db::tables::AccountHistory] table. + /// Inserts all accounts into [reth_db::tables::AccountsHistory] table. /// /// # Returns /// diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index e9ad2fcaa54..d6120179a63 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -105,14 +105,14 @@ impl BlobStore for DiskFileBlobStore { txs: Vec, ) -> Result, BlobStoreError> { if txs.is_empty() { - return Ok(Vec::new()); + return Ok(Vec::new()) } self.inner.get_all(txs) } fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { if txs.is_empty() { - return Ok(Vec::new()); + return Ok(Vec::new()) } self.inner.get_exact(txs) } @@ -213,7 +213,7 @@ impl DiskFileBlobStoreInner { /// Returns true if the blob for the given transaction hash is in the blob cache or on disk. fn contains(&self, tx: B256) -> Result { if self.blob_cache.lock().get(&tx).is_some() { - return Ok(true); + return Ok(true) } // we only check if the file exists and assume it's valid Ok(self.blob_disk_file(tx).is_file()) @@ -222,7 +222,7 @@ impl DiskFileBlobStoreInner { /// Retrieves the blob for the given transaction hash from the blob cache or disk. fn get_one(&self, tx: B256) -> Result, BlobStoreError> { if let Some(blob) = self.blob_cache.lock().get(&tx) { - return Ok(Some(blob.clone())); + return Ok(Some(blob.clone())) } let blob = self.read_one(tx)?; if let Some(blob) = &blob { @@ -321,11 +321,11 @@ impl DiskFileBlobStoreInner { } } if cache_miss.is_empty() { - return Ok(res); + return Ok(res) } let from_disk = self.read_many_decoded(cache_miss); if from_disk.is_empty() { - return Ok(res); + return Ok(res) } let mut cache = self.blob_cache.lock(); for (tx, data) in from_disk { diff --git a/crates/trie/src/hashed_cursor/default.rs b/crates/trie/src/hashed_cursor/default.rs index d49feedd184..298c5ce2e75 100644 --- a/crates/trie/src/hashed_cursor/default.rs +++ b/crates/trie/src/hashed_cursor/default.rs @@ -7,21 +7,21 @@ use reth_db::{ use reth_primitives::{Account, StorageEntry, B256}; impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { - type AccountCursor = ::Cursor; - type StorageCursor = ::DupCursor; + type AccountCursor = ::Cursor; + type StorageCursor = ::DupCursor; fn hashed_account_cursor(&self) -> Result { - self.cursor_read::() + self.cursor_read::() } fn hashed_storage_cursor(&self) -> Result { - self.cursor_dup_read::() + self.cursor_dup_read::() } } impl HashedAccountCursor for C where - C: DbCursorRO, + C: DbCursorRO, { fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { self.seek(key) @@ -34,7 +34,7 @@ where impl HashedStorageCursor for C where - C: DbCursorRO + DbDupCursorRO, + C: DbCursorRO + DbDupCursorRO, { fn is_storage_empty(&mut self, key: B256) -> Result { Ok(self.seek_exact(key)?.is_none()) diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index 8f1ec137eaf..7c9e048cb84 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -440,7 +440,7 @@ mod tests { let db = create_test_rw_db(); db.update(|tx| { for (key, account) in accounts.iter() { - tx.put::(*key, *account).unwrap(); + tx.put::(*key, *account).unwrap(); } }) .unwrap(); @@ -460,7 +460,7 @@ mod tests { let db = create_test_rw_db(); db.update(|tx| { for (key, account) in accounts.iter().filter(|x| x.0[31] % 2 == 0) { - tx.put::(*key, *account).unwrap(); + tx.put::(*key, *account).unwrap(); } }) .unwrap(); @@ -487,7 +487,7 @@ mod tests { let db = create_test_rw_db(); db.update(|tx| { for (key, account) in accounts.iter().filter(|x| x.0[31] % 2 == 0) { - tx.put::(*key, *account).unwrap(); + tx.put::(*key, *account).unwrap(); } }) .unwrap(); @@ -517,7 +517,7 @@ mod tests { db.update(|tx| { for (key, _) in accounts.iter() { // insert zero value accounts to the database - tx.put::(*key, Account::default()).unwrap(); + tx.put::(*key, Account::default()).unwrap(); } }) .unwrap(); @@ -539,7 +539,7 @@ mod tests { let db = create_test_rw_db(); db.update(|tx| { for (key, account) in db_accounts.iter() { - tx.put::(*key, *account).unwrap(); + tx.put::(*key, *account).unwrap(); } }) .unwrap(); @@ -586,7 +586,7 @@ mod tests { db.update(|tx| { for (slot, value) in db_storage.iter() { // insert zero value accounts to the database - tx.put::( + tx.put::( address, StorageEntry { key: *slot, value: *value }, ) @@ -664,7 +664,7 @@ mod tests { db.update(|tx| { for (slot, value) in db_storage.iter() { // insert zero value accounts to the database - tx.put::( + tx.put::( address, StorageEntry { key: *slot, value: *value }, ) @@ -703,7 +703,7 @@ mod tests { db.update(|tx| { for (slot, value) in db_storage { // insert zero value accounts to the database - tx.put::(address, StorageEntry { key: slot, value }) + tx.put::(address, StorageEntry { key: slot, value }) .unwrap(); } }) @@ -741,7 +741,7 @@ mod tests { db.update(|tx| { for (slot, value) in db_storage { // insert zero value accounts to the database - tx.put::(address, StorageEntry { key: slot, value }) + tx.put::(address, StorageEntry { key: slot, value }) .unwrap(); } }) @@ -773,7 +773,7 @@ mod tests { db.update(|tx| { for (slot, _) in storage.iter() { // insert zero value accounts to the database - tx.put::( + tx.put::( address, StorageEntry { key: *slot, value: U256::ZERO }, ) @@ -811,7 +811,7 @@ mod tests { for (address, storage) in db_storages.iter() { for (slot, value) in storage { let entry = StorageEntry { key: *slot, value: *value }; - tx.put::(*address, entry).unwrap(); + tx.put::(*address, entry).unwrap(); } } }) diff --git a/crates/trie/src/prefix_set/loader.rs b/crates/trie/src/prefix_set/loader.rs index 392c7bef840..10fbbadd469 100644 --- a/crates/trie/src/prefix_set/loader.rs +++ b/crates/trie/src/prefix_set/loader.rs @@ -33,7 +33,7 @@ impl<'a, TX: DbTx> PrefixSetLoader<'a, TX> { let mut destroyed_accounts = HashSet::default(); // Walk account changeset and insert account prefixes. - let mut account_changeset_cursor = self.cursor_read::()?; + let mut account_changeset_cursor = self.cursor_read::()?; let mut account_plain_state_cursor = self.cursor_read::()?; for account_entry in account_changeset_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, .. }) = account_entry?; @@ -47,7 +47,7 @@ impl<'a, TX: DbTx> PrefixSetLoader<'a, TX> { // Walk storage changeset and insert storage prefixes as well as account prefixes if missing // from the account prefix set. - let mut storage_cursor = self.cursor_dup_read::()?; + let mut storage_cursor = self.cursor_dup_read::()?; let storage_range = BlockNumberAddress::range(range); for storage_entry in storage_cursor.walk_range(storage_range)? { let (BlockNumberAddress((_, address)), StorageEntry { key, .. }) = storage_entry?; diff --git a/crates/trie/src/state.rs b/crates/trie/src/state.rs index 1aff3b2543f..8d9aead3d5d 100644 --- a/crates/trie/src/state.rs +++ b/crates/trie/src/state.rs @@ -65,7 +65,7 @@ impl HashedPostState { ) -> Result { // Iterate over account changesets and record value before first occurring account change. let mut accounts = HashMap::>::default(); - let mut account_changesets_cursor = tx.cursor_read::()?; + let mut account_changesets_cursor = tx.cursor_read::()?; for entry in account_changesets_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, info }) = entry?; if let hash_map::Entry::Vacant(entry) = accounts.entry(address) { @@ -75,7 +75,7 @@ impl HashedPostState { // Iterate over storage changesets and record value before first occurring storage change. let mut storages = HashMap::>::default(); - let mut storage_changesets_cursor = tx.cursor_read::()?; + let mut storage_changesets_cursor = tx.cursor_read::()?; for entry in storage_changesets_cursor.walk_range(BlockNumberAddress::range(range))? { let (BlockNumberAddress((_, address)), storage) = entry?; let account_storage = storages.entry(address).or_default(); diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 0a34ccda3cd..9d51077ba11 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -498,13 +498,13 @@ mod tests { storage: &BTreeMap, ) { let hashed_address = keccak256(address); - tx.put::(hashed_address, account).unwrap(); + tx.put::(hashed_address, account).unwrap(); insert_storage(tx, hashed_address, storage); } fn insert_storage(tx: &impl DbTxMut, hashed_address: B256, storage: &BTreeMap) { for (k, v) in storage { - tx.put::( + tx.put::( hashed_address, StorageEntry { key: keccak256(k), value: *v }, ) @@ -518,7 +518,7 @@ mod tests { let hashed_address = B256::with_last_byte(1); let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); + tx.tx_ref().cursor_dup_write::().unwrap(); let data = inputs.iter().map(|x| B256::from_str(x).unwrap()); let value = U256::from(0); for key in data { @@ -581,7 +581,7 @@ mod tests { let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); for (key, value) in &storage { - tx.tx_ref().put::( + tx.tx_ref().put::( hashed_address, StorageEntry { key: keccak256(key), value: *value }, ) @@ -777,7 +777,7 @@ mod tests { ); let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); + tx.tx_ref().cursor_dup_write::().unwrap(); for (hashed_slot, value) in storage.clone() { hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); } @@ -806,9 +806,9 @@ mod tests { let tx = factory.provider_rw().unwrap(); let mut hashed_account_cursor = - tx.tx_ref().cursor_write::().unwrap(); + tx.tx_ref().cursor_write::().unwrap(); let mut hashed_storage_cursor = - tx.tx_ref().cursor_dup_write::().unwrap(); + tx.tx_ref().cursor_dup_write::().unwrap(); let mut hash_builder = HashBuilder::default(); @@ -1002,7 +1002,7 @@ mod tests { { let mut hashed_account_cursor = - tx.tx_ref().cursor_write::().unwrap(); + tx.tx_ref().cursor_write::().unwrap(); let account = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); hashed_account_cursor.delete_current().unwrap(); @@ -1059,7 +1059,7 @@ mod tests { let tx = factory.provider_rw().unwrap(); { let mut hashed_account_cursor = - tx.tx_ref().cursor_write::().unwrap(); + tx.tx_ref().cursor_write::().unwrap(); let account2 = hashed_account_cursor.seek_exact(key2).unwrap().unwrap(); hashed_account_cursor.delete_current().unwrap(); @@ -1172,7 +1172,7 @@ mod tests { tokio::runtime::Runtime::new().unwrap().block_on(async { let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); - let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); + let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); let mut state = BTreeMap::default(); for accounts in account_changes { @@ -1234,7 +1234,7 @@ mod tests { ) -> (B256, HashMap) { let value = U256::from(1); - let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); + let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); let mut hb = HashBuilder::default().with_updates(true); @@ -1262,7 +1262,7 @@ mod tests { Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; let val = encode_account(a, None); - let mut hashed_accounts = tx.tx_ref().cursor_write::().unwrap(); + let mut hashed_accounts = tx.tx_ref().cursor_write::().unwrap(); let mut hb = HashBuilder::default(); for key in [ diff --git a/docs/crates/db.md b/docs/crates/db.md index cf0161d2b5c..b08383b7a6d 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -35,30 +35,30 @@ The `Table` trait has two generic values, `Key` and `Value`, which need to imple There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/storage/db/src/tables/mod.rs#L161-L188) if you would like to see the table definitions for any of the tables below. - CanonicalHeaders -- HeaderTD +- HeaderTerminalDifficulties - HeaderNumbers - Headers - BlockBodyIndices - BlockOmmers - BlockWithdrawals -- TransactionBlock +- TransactionBlocks - Transactions -- TxHashNumber +- TransactionHashNumbers - Receipts - PlainAccountState - PlainStorageState - Bytecodes -- AccountHistory -- StorageHistory -- AccountChangeSet -- StorageChangeSet +- AccountsHistory +- StoragesHistory +- AccountChangeSets +- StorageChangeSets - HashedAccount -- HashedStorage +- HashedStorages - AccountsTrie - StoragesTrie -- TxSenders -- SyncStage -- SyncStageProgress +- TransactionSenders +- StageCheckpoints +- StageCheckpointProgresses - PruneCheckpoints
@@ -137,7 +137,6 @@ The `Database` defines two associated types `TX` and `TXMut`. [File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/database.rs#L11) - The `TX` type can be any type that implements the `DbTx` trait, which provides a set of functions to interact with read only transactions. [File: crates/storage/db/src/abstraction/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/transaction.rs#L36) @@ -149,7 +148,7 @@ pub trait DbTx: Send + Sync { type Cursor: DbCursorRO + Send + Sync; /// DupCursor type for this read-only transaction type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; - + /// Get value fn get(&self, key: T::Key) -> Result, Error>; /// Commit for read only transaction will consume and free transaction and allows diff --git a/docs/design/database.md b/docs/design/database.md index 42ec8ba5603..db5da983f51 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -2,24 +2,24 @@ ## Abstractions -* We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/mod.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. -* We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/stages/src/db.rs#L14-L19) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. +- We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/mod.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. +- We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/stages/src/db.rs#L14-L19) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. ## Codecs -* We want Reth's serialized format to be able to trade off read/write speed for size, depending on who the user is. -* To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/table.rs#L9-L36) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. - * This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/db/benches/encoding_iai.rs#L5) (using [Criterion](https://github.com/bheisler/criterion.rs) and [Iai](https://github.com/bheisler/iai)) - * It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). -* We implemented that trait for the following encoding formats: - * [Ethereum-specific Compact Encoding](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/compact/mod.rs): A lot of Ethereum datatypes have unnecessary zeros when serialized, or optional (e.g. on empty hashes) which would be nice not to pay in storage costs. - * [Erigon](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) achieves that by having a `bitfield` set on Table "PlainState which adds a bitfield to Accounts. - * Akula expanded it for other tables and datatypes manually. It also saved some more space by storing the length of certain types (U256, u64) using the [`modular_bitfield`](https://docs.rs/modular-bitfield/latest/modular_bitfield/) crate, which compacts this information. - * We generalized it for all types, by writing a derive macro that autogenerates code for implementing the trait. It, also generates the interfaces required for fuzzing using ToB/test-fuzz: - * [Scale Encoding](https://github.com/paritytech/parity-scale-codec) - * [Postcard Encoding](https://github.com/jamesmunns/postcard) - * Passthrough (called `no_codec` in the codebase) -* We made implementation of these traits easy via a derive macro called [`main_codec`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/lib.rs#L15) that delegates to one of Compact (default), Scale, Postcard or Passthrough encoding. This is [derived on every struct we need](https://github.com/search?q=repo%3Aparadigmxyz%2Freth%20%22%23%5Bmain_codec%5D%22&type=code), and lets us experiment with different encoding formats without having to modify the entire codebase each time. +- We want Reth's serialized format to be able to trade off read/write speed for size, depending on who the user is. +- To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/table.rs#L9-L36) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. + - This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/db/benches/encoding_iai.rs#L5) (using [Criterion](https://github.com/bheisler/criterion.rs) and [Iai](https://github.com/bheisler/iai)) + - It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). +- We implemented that trait for the following encoding formats: + - [Ethereum-specific Compact Encoding](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/compact/mod.rs): A lot of Ethereum datatypes have unnecessary zeros when serialized, or optional (e.g. on empty hashes) which would be nice not to pay in storage costs. + - [Erigon](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) achieves that by having a `bitfield` set on Table "PlainState which adds a bitfield to Accounts. + - Akula expanded it for other tables and datatypes manually. It also saved some more space by storing the length of certain types (U256, u64) using the [`modular_bitfield`](https://docs.rs/modular-bitfield/latest/modular_bitfield/) crate, which compacts this information. + - We generalized it for all types, by writing a derive macro that autogenerates code for implementing the trait. It, also generates the interfaces required for fuzzing using ToB/test-fuzz: + - [Scale Encoding](https://github.com/paritytech/parity-scale-codec) + - [Postcard Encoding](https://github.com/jamesmunns/postcard) + - Passthrough (called `no_codec` in the codebase) +- We made implementation of these traits easy via a derive macro called [`main_codec`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/lib.rs#L15) that delegates to one of Compact (default), Scale, Postcard or Passthrough encoding. This is [derived on every struct we need](https://github.com/search?q=repo%3Aparadigmxyz%2Freth%20%22%23%5Bmain_codec%5D%22&type=code), and lets us experiment with different encoding formats without having to modify the entire codebase each time. ### Table layout @@ -58,11 +58,11 @@ Transactions { u64 TxNumber "PK" TransactionSignedNoHash Data } -TxHashNumber { +TransactionHashNumbers { B256 TxHash "PK" u64 TxNumber } -TransactionBlock { +TransactionBlocks { u64 MaxTxNumber "PK" u64 BlockNumber } @@ -83,31 +83,31 @@ PlainStorageState { B256 StorageKey "PK" U256 StorageValue } -AccountHistory { +AccountsHistory { B256 Account "PK" BlockNumberList BlockNumberList "List of transitions where account was changed" } -StorageHistory { +StoragesHistory { B256 Account "PK" B256 StorageKey "PK" BlockNumberList BlockNumberList "List of transitions where account storage entry was changed" } -AccountChangeSet { +AccountChangeSets { u64 BlockNumber "PK" B256 Account "PK" - ChangeSet AccountChangeSet "Account before transition" + ChangeSet AccountChangeSets "Account before transition" } -StorageChangeSet { +StorageChangeSets { u64 BlockNumber "PK" B256 Account "PK" B256 StorageKey "PK" - ChangeSet StorageChangeSet "Storage entry before transition" + ChangeSet StorageChangeSets "Storage entry before transition" } -HashedAccount { +HashedAccounts { B256 HashedAddress "PK" Account Data } -HashedStorage { +HashedStorages { B256 HashedAddress "PK" B256 HashedStorageKey "PK" U256 StorageValue @@ -121,17 +121,17 @@ StoragesTrie { StoredNibblesSubKey NibblesSubKey "PK" StorageTrieEntry Node } -TxSenders { +TransactionSenders { u64 TxNumber "PK" Address Sender } -TxHashNumber ||--|| Transactions : "hash -> tx id" -TransactionBlock ||--|{ Transactions : "tx id -> block number" +TransactionHashNumbers ||--|| Transactions : "hash -> tx id" +TransactionBlocks ||--|{ Transactions : "tx id -> block number" BlockBodyIndices ||--o{ Transactions : "block number -> tx ids" -Headers ||--o{ AccountChangeSet : "each block has zero or more changesets" -Headers ||--o{ StorageChangeSet : "each block has zero or more changesets" -AccountHistory }|--|{ AccountChangeSet : index -StorageHistory }|--|{ StorageChangeSet : index +Headers ||--o{ AccountChangeSets : "each block has zero or more changesets" +Headers ||--o{ StorageChangeSets : "each block has zero or more changesets" +AccountsHistory }|--|{ AccountChangeSets : index +StoragesHistory }|--|{ StorageChangeSets : index Headers ||--o| BlockOmmers : "each block has 0 or more ommers" BlockBodyIndices ||--|| Headers : "index" HeaderNumbers |o--|| Headers : "block hash -> block number" @@ -139,8 +139,8 @@ CanonicalHeaders |o--|| Headers : "canonical chain block number -> block hash" Transactions ||--|| Receipts : "each tx has a receipt" PlainAccountState }o--o| Bytecodes : "an account can have a bytecode" PlainAccountState ||--o{ PlainStorageState : "an account has 0 or more storage slots" -Transactions ||--|| TxSenders : "a tx has exactly 1 sender" +Transactions ||--|| TransactionSenders : "a tx has exactly 1 sender" -PlainAccountState ||--|| HashedAccount : "hashed representation" -PlainStorageState ||--|| HashedStorage : "hashed representation" +PlainAccountState ||--|| HashedAccounts : "hashed representation" +PlainStorageState ||--|| HashedStorages : "hashed representation" ``` diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 992cd40a1b1..2dddc540c45 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -161,7 +161,7 @@ impl State { bytecode_hash: code_hash, }; tx.put::(address, reth_account)?; - tx.put::(hashed_address, reth_account)?; + tx.put::(hashed_address, reth_account)?; if let Some(code_hash) = code_hash { tx.put::(code_hash, Bytecode::new_raw(account.code.clone()))?; } @@ -171,7 +171,7 @@ impl State { address, StorageEntry { key: storage_key, value: *v }, )?; - tx.put::( + tx.put::( hashed_address, StorageEntry { key: keccak256(storage_key), value: *v }, )